diff --git a/.gitignore b/.gitignore index 71cc3c382..b41881a8e 100644 --- a/.gitignore +++ b/.gitignore @@ -37,12 +37,12 @@ infrabox/test/github-review/results.xml src/pyinfrabox/.coverage src/pyinfrabox/coverage.xml src/pyinfrabox/results.xml -src/controller/pkg/client -src/controller/controller -src/controller/vendor src/services/gcp/vendor src/services/gcp/gcp src/services/gcp/tmp/_output/ src/services/namespace/vendor src/services/namespace/namespace src/services/namespace/tmp/_output/ +src/controller/vendor +src/controller/namespace +src/controller/tmp/_output/ diff --git a/deploy/infrabox-function/Chart.yaml b/deploy/infrabox-function/Chart.yaml new file mode 100644 index 000000000..ec19a3892 --- /dev/null +++ b/deploy/infrabox-function/Chart.yaml @@ -0,0 +1,2 @@ +name: infrabox-function +version: 1 diff --git a/deploy/infrabox-function/templates/_helpers.tpl b/deploy/infrabox-function/templates/_helpers.tpl new file mode 100644 index 000000000..9e1dcd3ca --- /dev/null +++ b/deploy/infrabox-function/templates/_helpers.tpl @@ -0,0 +1,293 @@ +{{ define "env_database" }} +- + name: INFRABOX_DATABASE_USER + valueFrom: + secretKeyRef: + name: infrabox-postgres + key: username +- + name: INFRABOX_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: infrabox-postgres + key: password +- + name: INFRABOX_DATABASE_HOST + value: {{ default "localhost" .Values.storage.postgres.host | quote }} +- + name: INFRABOX_DATABASE_DB + value: {{ default "infrabox" .Values.storage.postgres.db | quote }} +- + name: INFRABOX_DATABASE_PORT + value: {{ default 5432 .Values.storage.postgres.port | quote }} +- + name: INFRABOX_STORAGE_CLOUDSQL_ENABLED + value: {{ .Values.storage.cloudsql.enabled | quote }} +{{ if .Values.storage.cloudsql.enabled }} +- + name: INFRABOX_STORAGE_CLOUDSQL_INSTANCE_CONNECTION_NAME + value: {{ .Values.storage.cloudsql.instance_connection_name }} +{{ end }} +{{ end }} + +{{ define "volumes_rsa" }} +- + name: rsa-key + secret: + secretName: infrabox-rsa +{{ end }} + +{{ define "mounts_rsa_private" }} +- + name: rsa-key + mountPath: "/var/run/secrets/infrabox.net/rsa/id_rsa" + subPath: id_rsa + readOnly: true +{{ end }} + +{{ define "mounts_rsa_public" }} +- + name: rsa-key + mountPath: "/var/run/secrets/infrabox.net/rsa/id_rsa.pub" + subPath: id_rsa.pub + readOnly: true +{{ end }} + +{{ define "volumes_database" }} +{{ if .Values.storage.cloudsql.enabled }} +- + name: cloudsql-instance-credentials + secret: + secretName: infrabox-cloudsql-instance-credentials +- + name: cloudsql + emptyDir: +{{ end }} +{{ end }} + +{{ define "env_gcs" }} +- + name: INFRABOX_STORAGE_GCS_ENABLED + value: {{ .Values.storage.gcs.enabled | quote }} +{{ if .Values.storage.gcs.enabled }} +- + name: INFRABOX_STORAGE_GCS_BUCKET + value: {{ .Values.storage.gcs.bucket }} +- + name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/infrabox/gcs/gcs_service_account.json +{{ end }} +{{ end }} + +{{ define "env_s3" }} +- + name: INFRABOX_STORAGE_S3_ENABLED + value: {{ .Values.storage.s3.enabled | quote }} +{{ if .Values.storage.s3.enabled }} +- + name: INFRABOX_STORAGE_S3_ENDPOINT + value: {{ .Values.storage.s3.endpoint }} +- + name: INFRABOX_STORAGE_S3_PORT + value: {{ .Values.storage.s3.port | quote }} +- + name: INFRABOX_STORAGE_S3_REGION + value: {{ .Values.storage.s3.region | quote }} +- + name: INFRABOX_STORAGE_S3_SECURE + value: {{ .Values.storage.s3.secure | quote }} +- + name: INFRABOX_STORAGE_S3_BUCKET + value: {{ default "infrabox" .Values.storage.s3.bucket | quote }} +- + name: INFRABOX_STORAGE_S3_ACCESS_KEY + valueFrom: + secretKeyRef: + name: infrabox-s3-credentials + key: accessKey +- + name: INFRABOX_STORAGE_S3_SECRET_KEY + valueFrom: + secretKeyRef: + name: infrabox-s3-credentials + key: secretKey +{{ end }} +{{ end }} + +{{ define "env_azure" }} +- + name: INFRABOX_STORAGE_AZURE_ENABLED + value: {{ .Values.storage.azure.enabled | quote }} +{{ if .Values.storage.azure.enabled }} +- + name: INFRABOX_STORAGE_AZURE_ACCOUNT_NAME + valueFrom: + secretKeyRef: + name: infrabox-azure-credentials + key: account-name +- + name: INFRABOX_STORAGE_AZURE_ACCOUNT_KEY + valueFrom: + secretKeyRef: + name: infrabox-azure-credentials + key: account-key +{{ end }} +{{ end }} + +{{ define "env_github" }} +- + name: INFRABOX_GITHUB_ENABLED + value: {{ .Values.github.enabled | quote }} +{{ if .Values.github.enabled }} +- + name: INFRABOX_GITHUB_LOGIN_ENABLED + value: {{ .Values.github.login.enabled | quote }} +- + name: INFRABOX_GITHUB_API_URL + value: {{ default "https://api.github.com" .Values.github.api_url }} +- + name: INFRABOX_GITHUB_LOGIN_URL + value: {{ default "https://github.com/login" .Values.github.login.url }} +- + name: INFRABOX_GITHUB_LOGIN_ALLOWED_ORGANIZATIONS + value: {{ default "" .Values.github.login.allowed_organizations | quote }} +{{ end }} +{{ end }} + +{{ define "env_gerrit" }} +- + name: INFRABOX_GERRIT_ENABLED + value: {{ .Values.gerrit.enabled | quote }} +{{ if .Values.gerrit.enabled }} +- + name: INFRABOX_GERRIT_HOSTNAME + value: {{ required "gerrit.hostname is required" .Values.gerrit.hostname }} +- + name: INFRABOX_GERRIT_KEY_FILENAME + value: /root/.ssh/id_rsa +- + name: INFRABOX_GERRIT_USERNAME + value: {{ required "gerrit.username is required" .Values.gerrit.username }} +- + name: INFRABOX_GERRIT_PORT + value: {{ default "29418" .Values.gerrit.port | quote }} +{{ end }} +{{ end }} + +{{ define "env_ldap" }} +- + name: INFRABOX_ACCOUNT_LDAP_ENABLED + value: {{ .Values.account.ldap.enabled | quote }} +{{ if .Values.account.ldap.enabled }} +- + name: INFRABOX_ACCOUNT_LDAP_URL + value: {{ required "account.ldap.url is required" .Values.account.ldap.url }} +- + name: INFRABOX_ACCOUNT_LDAP_BASE + value: {{ required "account.ldap.base is required" .Values.account.ldap.base }} +- + name: INFRABOX_ACCOUNT_LDAP_DN + valueFrom: + secretKeyRef: + name: infrabox-ldap + key: dn +- + name: INFRABOX_ACCOUNT_LDAP_PASSWORD + valueFrom: + secretKeyRef: + name: infrabox-ldap + key: password +{{ end }} +{{ end }} + + +{{ define "env_github_secrets" }} +{{ if .Values.github.enabled }} +- + name: INFRABOX_GITHUB_CLIENT_ID + valueFrom: + secretKeyRef: + name: infrabox-github + key: client_id +- + name: INFRABOX_GITHUB_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: infrabox-github + key: client_secret +- + name: INFRABOX_GITHUB_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: infrabox-github + key: webhook_secret +{{ end }} +{{ end }} + +{{ define "env_general" }} +- + name: INFRABOX_GENERAL_LOG_STACKDRIVER + value: {{ default "false" .Values.general.log.stackdriver | quote }} +- + name: INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES + value: {{ default "false" .Values.general.dont_check_certificates | quote }} +- + name: INFRABOX_GENERAL_WORKER_NAMESPACE + value: {{ default "infrabox-worker" .Values.general.worker_namespace }} +- + name: INFRABOX_ROOT_URL + value: {{ .Values.root_url }} +- + name: INFRABOX_GENERAL_REPORT_ISSUE_URL + value: {{ .Values.general.report_issue_url }} +- + name: INFRABOX_GENERAL_DOCKER_REGISTRY + value: {{ .Values.general.docker_registry }} +{{ end }} + +{{ define "env_docker_registry" }} +- + name: INFRABOX_DOCKER_REGISTRY_ADMIN_USERNAME + value: "admin" +- + name: INFRABOX_DOCKER_REGISTRY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: infrabox-admin + key: password +{{ end }} + +{{ define "containers_database" }} +{{ if .Values.storage.cloudsql.enabled }} +- + image: gcr.io/cloudsql-docker/gce-proxy:1.09 + name: cloudsql-proxy + command: ["/cloud_sql_proxy", "--dir=/cloudsql", + "-instances={{ .Values.storage.cloudsql.instance_connection_name }}=tcp:5432", + "-credential_file=/secrets/cloudsql/credentials.json"] + volumeMounts: + - name: cloudsql-instance-credentials + mountPath: /secrets/cloudsql + readOnly: true + - name: cloudsql + mountPath: /cloudsql +{{ end }} +{{ end }} + +{{ define "env_job" }} +- + name: INFRABOX_JOB_MAX_OUTPUT_SIZE + value: {{ default "104857600" .Values.job.max_output_size | quote }} +- + name: INFRABOX_JOB_SECURITY_CONTEXT_CAPABILITIES_ENABLED + value: {{ default "false" .Values.job.security_context.capabilities.enabled | quote }} +{{ end }} + +{{ define "env_kubernetes" }} +- + name: INFRABOX_KUBERNETES_MASTER_HOST + value: {{ default "kubernetes.default.svc.cluster.local" .Values.general.kubernetes_master_host }} +- + name: INFRABOX_KUBERNETES_MASTER_PORT + value: {{ default 443 .Values.general.kubernetes_master_port | quote }} +{{ end }} diff --git a/deploy/infrabox-function/templates/function_crd.yaml b/deploy/infrabox-function/templates/function_crd.yaml new file mode 100644 index 000000000..1c83aa864 --- /dev/null +++ b/deploy/infrabox-function/templates/function_crd.yaml @@ -0,0 +1,65 @@ +apiVersion: core.infrabox.net/v1alpha1 +kind: IBFunction +metadata: + name: infrabox-default-function +spec: + image: {{ required "general.docker_registry is required" .Values.general.docker_registry }}/{{ default "job" .Values.job.image }}:{{ required "job.tag is required" .Values.job.tag }} + securityContext: + privileged: true + resources: + limits: + cpu: 1000m + requests: + cpu: 1000m + memory: 1Gi + env: + {{ include "env_general" . | indent 4 }} + {{ include "env_job" . | indent 4 }} + {{ include "env_gerrit" . | indent 4 }} + - + name: INFRABOX_LOCAL_CACHE_ENABLED + value: {{ .Values.local_cache.enabled | quote }} + - + name: INFRABOX_SERVICE + value: infrabox-default-function + - + name: INFRABOX_VERSION + value: "unknown" + volumeMounts: + - + mountPath: /etc/docker/daemon.json + name: dockerd-config + subPath: daemon.json + - + name: data-dir + mountPath: "/data" +{{ if .Values.local_cache.enabled }} + - + mountPath: /local-cache + name: local-cache +{{ end }} +{{ if .Values.gerrit.enabled }} + - + mountPath: /tmp/gerrit + name: gerrit-ssh +{{ end }} + volumes: + - + name: data-dir + emptyDir: {} + - + name: dockerd-config + configMap: + name: infrabox-dockerd-config +{{ if .Values.local_cache.enabled }} + - + name: local-cache + hostPath: + Path: {{ default "/tmp/infrabox/local_cache" .Values.local_cache.host_path }} +{{ end }} +{{ if .Values.gerrit.enabled }} + - + name: gerrit-ssh + secret: + secretName: infrabox-gerrit-ssh +{{ end }} diff --git a/deploy/infrabox-function/templates/pipeline_crd.yaml b/deploy/infrabox-function/templates/pipeline_crd.yaml new file mode 100644 index 000000000..b6f097573 --- /dev/null +++ b/deploy/infrabox-function/templates/pipeline_crd.yaml @@ -0,0 +1,8 @@ +apiVersion: core.infrabox.net/v1alpha1 +kind: IBPipeline +metadata: + name: infrabox-default-pipeline +spec: + steps: + - functionName: infrabox-default-function + name: run diff --git a/deploy/infrabox/templates/_helpers.tpl b/deploy/infrabox/templates/_helpers.tpl index 42e22ba90..9e1dcd3ca 100644 --- a/deploy/infrabox/templates/_helpers.tpl +++ b/deploy/infrabox/templates/_helpers.tpl @@ -274,27 +274,10 @@ {{ end }} {{ end }} -{{ define "env_local_cache" }} -- - name: INFRABOX_LOCAL_CACHE_ENABLED - value: {{ .Values.local_cache.enabled | quote }} -{{ if .Values.local_cache.enabled }} -- - name: INFRABOX_LOCAL_CACHE_HOST_PATH - value: {{ default "/tmp/infrabox/local_cache" .Values.local_cache.host_path }} -{{ end }} -{{ end }} - {{ define "env_job" }} - name: INFRABOX_JOB_MAX_OUTPUT_SIZE value: {{ default "104857600" .Values.job.max_output_size | quote }} -- - name: INFRABOX_JOB_MOUNT_DOCKER_SOCKET - value: {{ default "false" .Values.job.mount_docker_socket | quote }} -- - name: INFRABOX_JOB_USE_HOST_DOCKER_DAEMON - value: {{ default "false" .Values.job.use_host_docker_daemon | quote }} - name: INFRABOX_JOB_SECURITY_CONTEXT_CAPABILITIES_ENABLED value: {{ default "false" .Values.job.security_context.capabilities.enabled | quote }} diff --git a/deploy/infrabox/templates/api/deployment.yaml b/deploy/infrabox/templates/api/deployment.yaml index 6abbe7149..f424045a8 100644 --- a/deploy/infrabox/templates/api/deployment.yaml +++ b/deploy/infrabox/templates/api/deployment.yaml @@ -37,10 +37,6 @@ spec: - name: gerrit-ssh mountPath: /tmp/gerrit readOnly: true - resources: - requests: - memory: "256Mi" - cpu: "200m" {{ end }} - name: api @@ -89,10 +85,6 @@ spec: - name: INFRABOX_ACCOUNT_SIGNUP_ENABLED value: {{ .Values.account.signup.enabled | quote }} - resources: - requests: - memory: "1024Mi" - cpu: "1000m" volumes: {{ include "volumes_database" . | indent 16 }} {{ include "volumes_rsa" . | indent 16 }} diff --git a/deploy/infrabox/templates/controller/deployment.yaml b/deploy/infrabox/templates/controller/deployment.yaml new file mode 100644 index 000000000..1dad27a5a --- /dev/null +++ b/deploy/infrabox/templates/controller/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: infrabox-controller + namespace: {{ default "infrabox-system" .Values.general.system_namespace }} + labels: + app: infrabox-controller +spec: + replicas: 1 + template: + metadata: + labels: + app: infrabox-controller + spec: + terminationGracePeriodSeconds: 0 + {{ if .Values.general.rbac.enabled }} + serviceAccountName: infrabox-scheduler + {{ end }} + containers: + - + name: controller + image: {{ required "general.docker_registry is required" .Values.general.docker_registry }}/{{ default "controller" .Values.controller.image }}:{{ required "controller.tag is required" .Values.controller.tag }} + imagePullPolicy: Always + env: + - + name: WATCH_NAMESPACE + value: {{ default "infrabox-worker" .Values.general.worker_namespace }} + resources: + requests: + memory: "256Mi" + cpu: "200m" diff --git a/deploy/infrabox/templates/controller/function_crd.yaml b/deploy/infrabox/templates/controller/function_crd.yaml new file mode 100644 index 000000000..c7bb8ebfd --- /dev/null +++ b/deploy/infrabox/templates/controller/function_crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibfunctions.core.infrabox.net +spec: + group: core.infrabox.net + names: + kind: IBFunction + listKind: IBFunctionList + plural: ibfunctions + singular: ibfunction + scope: Cluster + version: v1alpha1 diff --git a/deploy/infrabox/templates/controller/function_invocation_crd.yaml b/deploy/infrabox/templates/controller/function_invocation_crd.yaml new file mode 100644 index 000000000..3a4e08a92 --- /dev/null +++ b/deploy/infrabox/templates/controller/function_invocation_crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibfunctioninvocations.core.infrabox.net +spec: + group: core.infrabox.net + names: + kind: IBFunctionInvocation + listKind: IBFunctionInvocationList + plural: ibfunctioninvocations + singular: ibfunctioninvocation + scope: Namespaced + version: v1alpha1 diff --git a/deploy/infrabox/templates/controller/pipeline_crd.yaml b/deploy/infrabox/templates/controller/pipeline_crd.yaml new file mode 100644 index 000000000..51a4c3430 --- /dev/null +++ b/deploy/infrabox/templates/controller/pipeline_crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibpipelines.core.infrabox.net +spec: + group: core.infrabox.net + names: + kind: IBPipeline + listKind: IBPipelineList + plural: ibpipelines + singular: ibpipline + scope: Cluster + version: v1alpha1 diff --git a/deploy/infrabox/templates/controller/pipeline_invocation_crd.yaml b/deploy/infrabox/templates/controller/pipeline_invocation_crd.yaml new file mode 100644 index 000000000..97620c01b --- /dev/null +++ b/deploy/infrabox/templates/controller/pipeline_invocation_crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ibpipelineinvocations.core.infrabox.net +spec: + group: core.infrabox.net + names: + kind: IBPipelineInvocation + listKind: IBPipelineInvocationList + plural: ibpipelineinvocations + singular: ibpiplineinvocation + scope: Namespaced + version: v1alpha1 diff --git a/deploy/infrabox/templates/dockerd-config-map.yaml b/deploy/infrabox/templates/dockerd-config-map.yaml index 63129fda5..284fa52a0 100644 --- a/deploy/infrabox/templates/dockerd-config-map.yaml +++ b/deploy/infrabox/templates/dockerd-config-map.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: ConfigMap metadata: name: infrabox-dockerd-config - namespace: {{ default "infrabox-worker" .Values.general.system_namespace }} + namespace: {{ default "infrabox-worker" .Values.general.worker_namespace }} data: {{ (.Files.Glob "config/docker/*").AsConfig | indent 2 }} diff --git a/deploy/infrabox/templates/job-controller/crd.yaml b/deploy/infrabox/templates/job-controller/crd.yaml deleted file mode 100644 index 480d66be6..000000000 --- a/deploy/infrabox/templates/job-controller/crd.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ibjobs.core.infrabox.net -spec: - group: core.infrabox.net - version: v1alpha1 - names: - kind: IBJob - plural: ibjobs - scope: Namespaced diff --git a/deploy/infrabox/templates/job-controller/deployment.yaml b/deploy/infrabox/templates/job-controller/deployment.yaml deleted file mode 100644 index 2ff19857d..000000000 --- a/deploy/infrabox/templates/job-controller/deployment.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: infrabox-controller - namespace: {{ default "infrabox-system" .Values.general.system_namespace }} - labels: - app: infrabox-controller -spec: - replicas: 1 - template: - metadata: - labels: - app: infrabox-controller - spec: - {{ if .Values.general.rbac.enabled }} - serviceAccountName: infrabox-scheduler - {{ end }} - containers: - - - name: controller - image: {{ required "general.docker_registry is required" .Values.general.docker_registry }}/{{ default "controller" .Values.controller.image }}:{{ required "controller.tag is required" .Values.controller.tag }} - imagePullPolicy: Always - env: - {{ include "env_gerrit" . | indent 16 }} - {{ include "env_local_cache" . | indent 16 }} - {{ include "env_job" . | indent 16 }} - {{ include "env_general" . | indent 16 }} - {{ include "env_kubernetes" . | indent 16 }} - - - name: INFRABOX_ROOT_URL - value: {{ required "root_url is required" .Values.root_url }} - - - name: INFRABOX_SERVICE - value: {{ default "controller" .Values.controller.image }} - - - name: INFRABOX_VERSION - value: {{ required "controller.tag is required" .Values.controller.tag }} - resources: - requests: - memory: "256Mi" - cpu: "200m" - volumeMounts: - - - mountPath: /etc/docker - name: dockerd-config - {{ include "mounts_rsa_private" . | indent 16 }} - volumes: - {{ include "volumes_rsa" . | indent 16 }} - - - name: dockerd-config - configMap: - name: infrabox-dockerd-config diff --git a/deploy/infrabox/templates/scheduler/deployment.yaml b/deploy/infrabox/templates/scheduler/deployment.yaml index d4947764a..7465972d0 100644 --- a/deploy/infrabox/templates/scheduler/deployment.yaml +++ b/deploy/infrabox/templates/scheduler/deployment.yaml @@ -39,6 +39,7 @@ spec: labels: app: infrabox-scheduler spec: + terminationGracePeriodSeconds: 0 {{ if .Values.general.rbac.enabled }} serviceAccountName: infrabox-scheduler {{ end }} @@ -59,9 +60,6 @@ spec: imagePullPolicy: Always env: {{ include "env_database" . | indent 16 }} - {{ include "env_gerrit" . | indent 16 }} - {{ include "env_local_cache" . | indent 16 }} - {{ include "env_job" . | indent 16 }} {{ include "env_general" . | indent 16 }} {{ include "env_kubernetes" . | indent 16 }} - @@ -80,15 +78,8 @@ spec: name: INFRABOX_CLUSTER_LABELS value: {{ .Values.cluster.labels }} volumeMounts: - - - mountPath: /etc/docker - name: dockerd-config {{ include "mounts_rsa_private" . | indent 16 }} volumes: {{ include "volumes_database" . | indent 16 }} {{ include "volumes_rsa" . | indent 16 }} - - - name: dockerd-config - configMap: - name: infrabox-dockerd-config {{ end }} diff --git a/deploy/infrabox/values.yaml b/deploy/infrabox/values.yaml index 2b2a409a0..40ef5da81 100644 --- a/deploy/infrabox/values.yaml +++ b/deploy/infrabox/values.yaml @@ -193,11 +193,14 @@ gerrit: # port: 29418 job: + # name of the job image in your general.docker_registry + # image: job + + # Tag + tag: latest + max_output_size: 104857600 - # mount_docker_socket: false - # use_host_docker_daemon: false - # security_context: capabilities: enabled: false diff --git a/deploy/install.py b/deploy/install.py index c8a7586e5..3c1b2953f 100644 --- a/deploy/install.py +++ b/deploy/install.py @@ -8,8 +8,6 @@ import logging import yaml -from Crypto.PublicKey import RSA - logging.basicConfig( format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%d-%m-%Y:%H:%M:%S', @@ -399,11 +397,10 @@ def setup_general(self): self.create_secret("infrabox-rsa", self.args.general_system_namespace, secret) def setup_job(self): - self.set('job.mount_docker_socket', self.args.job_mount_docker_socket) - self.set('job.use_host_docker_daemon', self.args.job_use_host_docker_daemon) self.set('job.security_context.capabilities.enabled', self.args.job_security_context_capabilities_enabled) + self.set('job.tag', self.args.version) self.set('job.api.url', self.args.root_url + '/api/job') self.set('job.api.tag', self.args.version) @@ -441,6 +438,7 @@ def main(self): # Copy helm chart copy_files(self.args, 'infrabox') + copy_files(self.args, 'infrabox-function') # Load values values_path = os.path.join(self.args.o, 'infrabox', 'values.yaml') @@ -478,6 +476,9 @@ def main(self): self.config.dump(values_path) + values_path = os.path.join(self.args.o, 'infrabox-function', 'values.yaml') + self.config.dump(values_path) + def main(): parser = argparse.ArgumentParser(description='Install InfraBox') parser.add_argument('-o', @@ -584,8 +585,6 @@ def main(): parser.add_argument('--local-cache-host-path') # Job - parser.add_argument('--job-mount-docker-socket', action='store_true', default=False) - parser.add_argument('--job-use-host-docker-daemon', action='store_true', default=False) parser.add_argument('--job-security-context-capabilities-enabled', action='store_true', default=False) # Parse options diff --git a/docs/install/install.md b/docs/install/install.md index 9dad07071..a5790e071 100644 --- a/docs/install/install.md +++ b/docs/install/install.md @@ -75,6 +75,7 @@ InfraBox requires an object store to persist some data like inputs/outpus, cache - [Google Cloud Storage (recommended on GCP)](/docs/install/storage/gcs.md) - [S3](/docs/install/storage/s3.md) - [Minio](/docs/install/storage/minio.md) +- [Azure](/docs/install/storage/azure.md) ### Install PostgreSQL InfraBox requires a PostgreSQL Database for persisting some data. You have the following options: @@ -130,6 +131,9 @@ To deploy InfraBox: cd /tmp/infrabox-configuration/infrabox helm install -n infrabox . + cd /tmp/infrabox-configuration/infrabox-function + helm install -n infrabox-function . + After a few seconds you can open your browser and access `https://`. [helm]: https://github.com/kubernetes/helm diff --git a/ib.py b/ib.py index 78cfed7a4..0716c0d2d 100755 --- a/ib.py +++ b/ib.py @@ -22,8 +22,9 @@ {'name': 'gerrit-review'}, {'name': 'github-trigger'}, {'name': 'github-review'}, + {'name': 'collector-api'}, + {'name': 'collector-fluentd'}, {'name': 'job'}, - {'name': 'job-git'}, {'name': 'controller'}, {'name': 'scheduler-kubernetes'}, {'name': 'api'}, diff --git a/infrabox/generator/deployments.json b/infrabox/generator/deployments.json index 4bd6b29aa..4ed2a2efc 100644 --- a/infrabox/generator/deployments.json +++ b/infrabox/generator/deployments.json @@ -34,7 +34,40 @@ "cache": { "image": true } - + }, { + "type": "docker", + "build_context": "../..", + "name": "collector-api", + "docker_file": "src/collector-api/Dockerfile", + "build_only": true, + "resources": { "limits": { "cpu": 1, "memory": 1024 } }, + "deployments": [{ + "type": "docker-registry", + "host": "quay.io/infrabox", + "repository": "collector-api", + "username": "infrabox+infrabox_ci", + "password": { "$secret": "QUAY_PASSWORD" } + }], + "cache": { + "image": true + } + }, { + "type": "docker", + "build_context": "../..", + "name": "collector-fluentd", + "docker_file": "src/collector-fluentd/Dockerfile", + "build_only": true, + "resources": { "limits": { "cpu": 1, "memory": 1024 } }, + "deployments": [{ + "type": "docker-registry", + "host": "quay.io/infrabox", + "repository": "collector-fluentd", + "username": "infrabox+infrabox_ci", + "password": { "$secret": "QUAY_PASSWORD" } + }], + "cache": { + "image": true + } }, { "type": "docker", "build_context": "../..", @@ -188,23 +221,6 @@ "cache": { "image": true } - }, { - "type": "docker", - "build_context": "../..", - "name": "job-git", - "docker_file": "src/job/git/Dockerfile", - "build_only": true, - "resources": { "limits": { "cpu": 1, "memory": 1024 } }, - "deployments": [{ - "type": "docker-registry", - "host": "quay.io/infrabox", - "repository": "job-git", - "username": "infrabox+infrabox_ci", - "password": { "$secret": "QUAY_PASSWORD" } - }], - "cache": { - "image": true - } }, { "type": "docker", "build_context": "../..", diff --git a/infrabox/generator/e2e.json b/infrabox/generator/e2e.json index 796ff5da3..6d6267929 100644 --- a/infrabox/generator/e2e.json +++ b/infrabox/generator/e2e.json @@ -20,8 +20,8 @@ }, "spec": { "machineType": "n1-standard-4", - "numNodes": "2", - "preemptible": "false" + "numNodes": 2, + "preemptible": false } }] }] diff --git a/infrabox/test/api/job_api_test.py b/infrabox/test/api/job_api_test.py index 16329eb69..ba3014212 100644 --- a/infrabox/test/api/job_api_test.py +++ b/infrabox/test/api/job_api_test.py @@ -261,17 +261,3 @@ def test_testresult(self): row_dictionary = dict(zip(keys, received_row)) self.assertTrue(all(item in testresult_data["tests"][i].items() for item in row_dictionary.items())) - - def test_setfinished(self): - data = { - "state": "finished", - "message": "Job successfully finished" - } - r = TestClient.post(self.url_ns + '/setfinished', data, self.job_headers) - self.assertEqual(r, {}) - - r = TestClient.execute_one("""SELECT state, message, console FROM job - WHERE id = %s""", [self.job_id]) - self.assertEqual(r["state"], data["state"]) - self.assertEqual(r["message"], data["message"]) - self.assertEqual(r["console"], "") diff --git a/infrabox/test/api/test_template.py b/infrabox/test/api/test_template.py index 55d96e1c0..c03673dd5 100644 --- a/infrabox/test/api/test_template.py +++ b/infrabox/test/api/test_template.py @@ -15,7 +15,6 @@ def setUp(self): TestClient.execute('TRUNCATE build') TestClient.execute('TRUNCATE console') TestClient.execute('TRUNCATE job') - TestClient.execute('TRUNCATE job_stat') TestClient.execute('TRUNCATE job_markup') TestClient.execute('TRUNCATE job_badge') TestClient.execute('TRUNCATE source_upload') diff --git a/infrabox/test/e2e/entrypoint.sh b/infrabox/test/e2e/entrypoint.sh index e4fc8344f..286b98584 100755 --- a/infrabox/test/e2e/entrypoint.sh +++ b/infrabox/test/e2e/entrypoint.sh @@ -192,6 +192,12 @@ _installInfrabox() { helm install --namespace infrabox-system . popd + sleep 5 + + pushd $outdir/infrabox-function + helm install --namespace infrabox-system . + popd + export INFRABOX_DATABASE_HOST=localhost export INFRABOX_DATABASE_DB=postgres export INFRABOX_DATABASE_USER=postgres diff --git a/infrabox/test/e2e/test.py b/infrabox/test/e2e/test.py index 8cd3a2b81..b13f017b3 100644 --- a/infrabox/test/e2e/test.py +++ b/infrabox/test/e2e/test.py @@ -31,7 +31,6 @@ def setUp(self): cur.execute('''DELETE FROM source_upload''') cur.execute('''DELETE FROM build''') cur.execute('''DELETE FROM test_run''') - cur.execute('''DELETE FROM job_stat''') cur.execute('''DELETE FROM measurement''') cur.execute('''DELETE FROM test''') cur.execute('''DELETE FROM job_markup''') diff --git a/src/api/Dockerfile b/src/api/Dockerfile index 4051c0707..a1e8536f2 100644 --- a/src/api/Dockerfile +++ b/src/api/Dockerfile @@ -1,9 +1,10 @@ -FROM debian:9.3 +FROM debian:9.4-slim -RUN apt-get update -y && apt-get install -y python python-psycopg2 python-requests python-pip python-flask python-ldap && \ - pip install PyJWT jsonschema cryptography flask_restplus eventlet flask_socketio boto3 google-cloud-storage future bcrypt \ +RUN apt-get update -y && apt-get install -y python python-psycopg2 python-pip python-flask python-ldap && \ + pip install requests==2.18.4 PyJWT jsonschema cryptography flask_restplus eventlet flask_socketio boto3 google-cloud-storage future bcrypt pycrypto \ azure-mgmt-resource azure-storage && \ - apt-get remove -y python-pip + apt-get remove -y python-pip && \ + rm -rf /var/lib/apt/lists/* ENV PYTHONPATH=/ diff --git a/src/api/handlers/job_api.py b/src/api/handlers/job_api.py index a727aa4a2..e9450486f 100644 --- a/src/api/handlers/job_api.py +++ b/src/api/handlers/job_api.py @@ -491,7 +491,7 @@ def post(self): g.db.commit() - return jsonify({}) + return jsonify({"message": "File uploaded"}) # TODO(steffen): check upload output sizes @@ -885,8 +885,6 @@ def post(self): if 'labels' not in s['metadata']: s['metadata']['labels'] = {} - s['metadata']['labels']['service.infrabox.net/id'] = str(uuid.uuid4()) - # Create job g.db.execute(""" INSERT INTO job (id, state, build_id, type, dockerfile, name, @@ -1126,11 +1124,6 @@ def _remove_file(response): except ValidationError as e: abort(400, e.message) - testruns = g.db.execute_one("SELECT COUNT(*) as cnt FROM test_run WHERE job_id = %s", [job_id]) - - if testruns[0] > 0: - abort(400, "testrun already created") - rows = g.db.execute_one(""" SELECT j.project_id, b.build_number FROM job j @@ -1152,15 +1145,6 @@ def _remove_file(response): test_runs = [] measurements = [] - stats = { - "tests_added": 0, - "tests_duration": 0, - "tests_skipped": 0, - "tests_failed": 0, - "tests_error": 0, - "tests_passed": 0, - } - tests = data['tests'] for t in tests: @@ -1186,20 +1170,10 @@ def _remove_file(response): test_id, build_number )) - stats['tests_added'] += 1 # Track stats if t['status'] == 'fail' or t['status'] == 'failure': t['status'] = 'failure' - stats['tests_failed'] += 1 - elif t['status'] == 'ok': - stats['tests_passed'] += 1 - elif t['status'] == 'skipped': - stats['tests_skipped'] += 1 - elif t['status'] == 'error': - stats['tests_error'] += 1 - - stats['tests_duration'] += t['duration'] # Create the corresponding test run test_run_id = str(uuid.uuid4()) @@ -1233,44 +1207,5 @@ def _remove_file(response): insert(g.db.conn, ("id", "state", "job_id", "test_id", "duration", "project_id", "message", "stack"), test_runs, 'test_run') - insert(g.db.conn, ("tests_added", "tests_duration", "tests_skipped", "tests_failed", "tests_error", - "tests_passed", "job_id", "project_id"), - ((stats['tests_added'], stats['tests_duration'], stats['tests_skipped'], - stats['tests_failed'], stats['tests_error'], stats['tests_passed'], - job_id, project_id),), 'job_stat') - - g.db.commit() - return jsonify({}) - -@ns.route("/setfinished") -class SetFinished(Resource): - - @job_token_required - def post(self): - job_id = g.token['job']['id'] - - state = request.json['state'] - message = request.json.get('message', None) - - # collect console output - lines = g.db.execute_many("""SELECT output FROM console WHERE job_id = %s - ORDER BY date""", [job_id]) - - output = "" - for l in lines: - output += l[0] - - # Update state - g.db.execute(""" - UPDATE job SET - state = %s, - console = %s, - end_date = current_timestamp, - message = %s - WHERE id = %s""", [state, output, message, job_id]) - - # remove form console table - g.db.execute("DELETE FROM console WHERE job_id = %s", [job_id]) - g.db.commit() return jsonify({}) diff --git a/src/collector-api/Dockerfile b/src/collector-api/Dockerfile new file mode 100644 index 000000000..383d8e84b --- /dev/null +++ b/src/collector-api/Dockerfile @@ -0,0 +1,15 @@ +FROM debian:9.4-slim + +RUN apt-get update -y && apt-get install -y python python-pip python-flask && \ + pip install flask_restplus eventlet && \ + apt-get remove -y python-pip && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* + +ENV PYTHONPATH=/ + +COPY src/collector-api /collector +COPY src/pyinfraboxutils /pyinfraboxutils +COPY src/pyinfrabox /pyinfrabox + +CMD python /collector/server.py diff --git a/src/collector-api/__init__.py b/src/collector-api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/collector-api/server.py b/src/collector-api/server.py new file mode 100644 index 000000000..88c12900d --- /dev/null +++ b/src/collector-api/server.py @@ -0,0 +1,121 @@ +#pylint: disable=unused-import,relative-import,wrong-import-position +import uuid +import os +import sys +import json + +from flask import Flask, request, send_from_directory, abort, Response +from flask_restplus import Resource, Api + +import eventlet +eventlet.monkey_patch() + +from pyinfraboxutils import get_env, print_stackdriver, get_logger + +logger = get_logger('api') + +storage_path = '/tmp/collector/' + +app = Flask(__name__) +api = Api(app) + +@api.route('/ping') +class Ping(Resource): + def get(self): + return {'status': 200} + +def handle_entry(entry): + e = entry['kubernetes'] + pod_path = os.path.join(storage_path, e['pod_id']) + + if not os.path.exists(pod_path): + os.makedirs(pod_path) + + metadata_path = os.path.join(pod_path, "metadata.json") + log_path = os.path.join(pod_path, e['container_name'] +".log") + + + if not os.path.exists(metadata_path): + with open(metadata_path, 'w+') as metadata_file: + md = { + 'namespace_id': e['namespace_id'], + 'namespace_name': e['namespace_name'], + 'pod_id': e['pod_id'], + 'pod_name': e['pod_name'], + 'containers': [] + } + json.dump(md, metadata_file) + + if not os.path.exists(log_path): + # this is the first log entry we receive, so also register it in the metadata + with open(metadata_path, 'r') as metadata_file: + md = json.load(metadata_file) + md['containers'].append(e['container_name']) + + with open(metadata_path, 'w') as metadata_file: + json.dump(md, metadata_file) + + if 'log' in entry: + with open(log_path, 'a+') as log_file: + log_file.write(entry['log']) + +@api.route('/api/log') +class Console(Resource): + def post(self): + entries = request.get_json() + + for e in entries: + handle_entry(e) + + return {'status': 200} + +@api.route('/api/pods') +class Pods(Resource): + def get(self): + pods = [] + for root, dirs, _ in os.walk(storage_path): + for name in dirs: + p = os.path.join(root, name, 'metadata.json') + with open(p) as f: + pods.append(json.load(f)) + + return pods + +@api.route('/api/pods/') +class Pod(Resource): + def get(self, pod_id): + p = os.path.join(os.path.join(storage_path), pod_id, 'metadata.json') + + if not p.startswith(storage_path): + abort(404) + + if not os.path.exists(p): + abort(404) + + with open(p) as f: + return json.load(f) + +@api.route('/api/pods//log/') +class PodLog(Resource): + def get(self, pod_id, container_name): + p = os.path.join(os.path.join(storage_path), pod_id, container_name + '.log') + + if not p.startswith(storage_path): + abort(404) + + if not os.path.exists(p): + abort(404) + + with open(p) as f: + d = f.read() + return Response(d, mimetype='text/plain') + +def main(): # pragma: no cover + app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 1024 * 4 + + port = int(os.environ.get('INFRABOX_PORT', 8080)) + logger.info('Starting Server on port %s', port) + app.run(host='0.0.0.0', port=port) + +if __name__ == "__main__": # pragma: no cover + main() diff --git a/src/collector-fluentd/Dockerfile b/src/collector-fluentd/Dockerfile new file mode 100644 index 000000000..0342f2f66 --- /dev/null +++ b/src/collector-fluentd/Dockerfile @@ -0,0 +1,34 @@ +FROM fluent/fluentd:v0.12.33 + +USER root +WORKDIR /home/fluent + +ENV PATH /fluentd/vendor/bundle/ruby/2.3.0/bin:$PATH +ENV GEM_PATH /fluentd/vendor/bundle/ruby/2.3.0 +ENV GEM_HOME /fluentd/vendor/bundle/ruby/2.3.0 +ENV FLUENTD_DISABLE_BUNDLER_INJECTION 1 + +COPY src/collector-fluentd/Gemfile* /fluentd/ +RUN set -ex \ + && apk upgrade --no-cache \ + && apk add --no-cache ruby-bundler \ + && apk add --no-cache --virtual .build-deps \ + build-base \ + ruby-dev \ + libffi-dev \ + && gem install bundler --version 1.16.1 \ + && bundle config silence_root_warning true \ + && bundle install --gemfile=/fluentd/Gemfile --path=/fluentd/vendor/bundle \ + && apk del .build-deps \ + && gem sources --clear-all \ + && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem + +COPY src/collector-fluentd/conf/fluent.conf /fluentd/etc/ +COPY src/collector-fluentd/plugins /fluentd/plugins/ +COPY src/collector-fluentd/entrypoint.sh /fluentd/entrypoint.sh + +ENV FLUENTD_OPT="" +ENV FLUENTD_CONF="fluent.conf" + +# Run Fluentd +CMD ["/fluentd/entrypoint.sh"] diff --git a/src/collector-fluentd/Gemfile b/src/collector-fluentd/Gemfile new file mode 100644 index 000000000..3d7a653cb --- /dev/null +++ b/src/collector-fluentd/Gemfile @@ -0,0 +1,11 @@ +# AUTOMATICALLY GENERATED +# DO NOT EDIT THIS FILE DIRECTLY, USE /templates/Gemfile.erb + +source "https://rubygems.org" + +gem "fluentd", "0.12.33" +gem "oj", "3.5.1" +gem "fluent-plugin-secure-forward" +gem "fluent-plugin-record-reformer" +gem "fluent-plugin-gcs", "~>0.3" +gem "fluent-plugin-kubernetes_metadata_filter" diff --git a/src/collector-fluentd/Gemfile.lock b/src/collector-fluentd/Gemfile.lock new file mode 100644 index 000000000..f12a60863 --- /dev/null +++ b/src/collector-fluentd/Gemfile.lock @@ -0,0 +1,140 @@ +GEM + remote: https://rubygems.org/ + specs: + activesupport (5.2.0) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 0.7, < 2) + minitest (~> 5.1) + tzinfo (~> 1.1) + addressable (2.5.2) + public_suffix (>= 2.0.2, < 4.0) + concurrent-ruby (1.0.5) + cool.io (1.5.3) + digest-crc (0.4.1) + domain_name (0.5.20180417) + unf (>= 0.0.5, < 1.0.0) + faraday (0.15.0) + multipart-post (>= 1.2, < 3) + fluent-plugin-gcs (0.3.0) + fluentd (~> 0.12.0) + google-cloud-storage (~> 0.23.2) + fluent-plugin-kubernetes_metadata_filter (1.0.2) + fluentd (>= 0.12.0) + kubeclient (~> 1.1.4) + lru_redux + fluent-plugin-record-reformer (0.9.1) + fluentd + fluent-plugin-secure-forward (0.4.5) + fluentd (>= 0.10.46) + proxifier + resolve-hostname + fluentd (0.12.33) + cool.io (>= 1.2.2, < 2.0.0) + http_parser.rb (>= 0.5.1, < 0.7.0) + json (>= 1.4.3) + msgpack (>= 0.5.11, < 2) + sigdump (~> 0.2.2) + string-scrub (>= 0.0.3, <= 0.0.5) + tzinfo (>= 1.0.0) + tzinfo-data (>= 1.0.0) + yajl-ruby (~> 1.0) + google-api-client (0.9.28) + addressable (~> 2.3) + googleauth (~> 0.5) + httpclient (~> 2.7) + hurley (~> 0.1) + memoist (~> 0.11) + mime-types (>= 1.6) + representable (~> 2.3.0) + retriable (~> 2.0) + google-cloud-core (0.21.0) + google-cloud-storage (0.23.2) + digest-crc (~> 0.4) + google-api-client (~> 0.9.11) + google-cloud-core (~> 0.21.0) + googleauth (0.6.2) + faraday (~> 0.12) + jwt (>= 1.4, < 3.0) + logging (~> 2.0) + memoist (~> 0.12) + multi_json (~> 1.11) + os (~> 0.9) + signet (~> 0.7) + http (0.9.8) + addressable (~> 2.3) + http-cookie (~> 1.0) + http-form_data (~> 1.0.1) + http_parser.rb (~> 0.6.0) + http-cookie (1.0.3) + domain_name (~> 0.5) + http-form_data (1.0.3) + http_parser.rb (0.6.0) + httpclient (2.8.3) + hurley (0.2) + i18n (1.0.1) + concurrent-ruby (~> 1.0) + json (2.1.0) + jwt (2.1.0) + kubeclient (1.1.4) + activesupport + http (= 0.9.8) + recursive-open-struct (= 1.0.0) + rest-client + little-plugger (1.1.4) + logging (2.2.2) + little-plugger (~> 1.1) + multi_json (~> 1.10) + lru_redux (1.1.0) + memoist (0.16.0) + mime-types (3.1) + mime-types-data (~> 3.2015) + mime-types-data (3.2016.0521) + minitest (5.11.3) + msgpack (1.2.4) + multi_json (1.13.1) + multipart-post (2.0.0) + netrc (0.11.0) + oj (3.5.1) + os (0.9.6) + proxifier (1.0.3) + public_suffix (3.0.2) + recursive-open-struct (1.0.0) + representable (2.3.0) + uber (~> 0.0.7) + resolve-hostname (0.1.0) + rest-client (2.0.2) + http-cookie (>= 1.0.2, < 2.0) + mime-types (>= 1.16, < 4.0) + netrc (~> 0.8) + retriable (2.1.0) + sigdump (0.2.4) + signet (0.8.1) + addressable (~> 2.3) + faraday (~> 0.9) + jwt (>= 1.5, < 3.0) + multi_json (~> 1.10) + string-scrub (0.0.5) + thread_safe (0.3.6) + tzinfo (1.2.5) + thread_safe (~> 0.1) + tzinfo-data (1.2018.4) + tzinfo (>= 1.0.0) + uber (0.0.15) + unf (0.1.4) + unf_ext + unf_ext (0.0.7.5) + yajl-ruby (1.4.0) + +PLATFORMS + ruby + +DEPENDENCIES + fluent-plugin-gcs (~> 0.3) + fluent-plugin-kubernetes_metadata_filter + fluent-plugin-record-reformer + fluent-plugin-secure-forward + fluentd (= 0.12.33) + oj (= 3.5.1) + +BUNDLED WITH + 1.16.1 diff --git a/src/collector-fluentd/conf/fluent.conf b/src/collector-fluentd/conf/fluent.conf new file mode 100644 index 000000000..3c80e3ab7 --- /dev/null +++ b/src/collector-fluentd/conf/fluent.conf @@ -0,0 +1,32 @@ + + @type null + + + @type tail + path /var/log/containers/*.log + pos_file /var/log/fluentd-containers.log.pos + time_format %Y-%m-%dT%H:%M:%S.%NZ + tag kubernetes.* + format json + read_from_head true + + + @type kubernetes_metadata + + + @type null + + + @type null + + + @type null + + + @type http_buffered + flush_interval 1s + endpoint_url "#{ENV['INFRABOX_COLLECTOR_ENDPOINT']}" + http_retry_statuses 500, 403 + http_read_timeout 5.0 + http_open_timeout 5.0 + diff --git a/src/collector-fluentd/entrypoint.sh b/src/collector-fluentd/entrypoint.sh new file mode 100755 index 000000000..64a4005e4 --- /dev/null +++ b/src/collector-fluentd/entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +if [ -z ${FLUENT_ELASTICSEARCH_USER} ] ; then + sed -i '/FLUENT_ELASTICSEARCH_USER/d' /fluentd/etc/${FLUENTD_CONF} +fi + +if [ -z ${FLUENT_ELASTICSEARCH_PASSWORD} ] ; then + sed -i '/FLUENT_ELASTICSEARCH_PASSWORD/d' /fluentd/etc/${FLUENTD_CONF} +fi + +exec fluentd -c /fluentd/etc/${FLUENTD_CONF} -p /fluentd/plugins --gemfile /fluentd/Gemfile ${FLUENTD_OPT} diff --git a/src/collector-fluentd/plugins/LICENSE.txt b/src/collector-fluentd/plugins/LICENSE.txt new file mode 100644 index 000000000..587d56b5d --- /dev/null +++ b/src/collector-fluentd/plugins/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2013 ablagoev + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/collector-fluentd/plugins/README.rdoc b/src/collector-fluentd/plugins/README.rdoc new file mode 100644 index 000000000..a6800a3fb --- /dev/null +++ b/src/collector-fluentd/plugins/README.rdoc @@ -0,0 +1,37 @@ += fluent-out-http-buffered + +This is an output plugin for Fluentd[http://fluentd.org/] which deliveres buffered log messages to an http endpoint. + +It has configurable read[http://ruby-doc.org/stdlib-2.0/libdoc/net/http/rdoc/Net/HTTP.html#method-i-read_timeout-3D] and open[http://ruby-doc.org/stdlib-2.0/libdoc/net/http/rdoc/Net/HTTP.html#open_timeout] timeouts. + +Clients can also configure which http response statuses should be retried (in most cases clients would want to retry on status 500). + +All messages are sent through POST in json format. + +The plugin was influenced by the standard http_output_plugin[https://github.com/ento/fluent-plugin-out-http]. + +== Installation: + +`gem install fluent-out-http-buffered` + +== Usage: + + # Configuration file fluent.conf + + type http_buffered + flush_interval 2s + #Endpoint for messages + endpoint_url http://localhost/fluent.php + #Comma separated list of http statuses which need to be retried + http_retry_statuses 500, 403 + #Read timeout in seconds, supports floats + http_read_timeout 2.2 + #Open timeout in seconds, supports floats + http_open_timeout 2.34 + + +== Copyright + +Copyright (c) 2013 ablagoev. See LICENSE.txt for +further details. + diff --git a/src/collector-fluentd/plugins/out_http_buffered.rb b/src/collector-fluentd/plugins/out_http_buffered.rb new file mode 100644 index 000000000..13f109cc4 --- /dev/null +++ b/src/collector-fluentd/plugins/out_http_buffered.rb @@ -0,0 +1,110 @@ +# encoding: utf-8 + +module Fluent + # Main Output plugin class + class HttpBufferedOutput < Fluent::BufferedOutput + Fluent::Plugin.register_output('http_buffered', self) + + def initialize + super + require 'net/http' + require 'uri' + end + + # Endpoint URL ex. localhost.local/api/ + config_param :endpoint_url, :string + + # statuses under which to retry + config_param :http_retry_statuses, :string, default: '' + + # read timeout for the http call + config_param :http_read_timeout, :float, default: 2.0 + + # open timeout for the http call + config_param :http_open_timeout, :float, default: 2.0 + + def configure(conf) + super + + # Check if endpoint URL is valid + unless @endpoint_url =~ /^#{URI.regexp}$/ + fail Fluent::ConfigError, 'endpoint_url invalid' + end + + begin + @uri = URI.parse(@endpoint_url) + rescue URI::InvalidURIError + raise Fluent::ConfigError, 'endpoint_url invalid' + end + + # Parse http statuses + @statuses = @http_retry_statuses.split(',').map { |status| status.to_i } + + @statuses = [] if @statuses.nil? + + @http = Net::HTTP.new(@uri.host, @uri.port) + @http.read_timeout = @http_read_timeout + @http.open_timeout = @http_open_timeout + end + + def start + super + end + + def shutdown + super + begin + @http.finish + rescue + end + end + + def format(tag, time, record) + [tag, time, record].to_msgpack + end + + def write(chunk) + data = [] + chunk.msgpack_each do |(tag, time, record)| + data << record + end + + request = create_request(data) + + begin + response = @http.start do |http| + request = create_request(data) + http.request request + end + + if @statuses.include? response.code.to_i + # Raise an exception so that fluent retries + fail "Server returned bad status: #{response.code}" + end + rescue IOError, EOFError, SystemCallError => e + # server didn't respond + $log.warn "Net::HTTP.#{request.method.capitalize} raises exception: #{e.class}, '#{e.message}'" + ensure + begin + @http.finish + rescue + end + end + end + + protected + + def create_request(data) + request = Net::HTTP::Post.new(@uri.request_uri) + + # Headers + request['Content-Type'] = 'application/json' + + # Body + request.body = JSON.dump(data) + + request + end + end +end + diff --git a/src/controller/Dockerfile b/src/controller/Dockerfile index 1f4ca3ee6..ff5f830f5 100644 --- a/src/controller/Dockerfile +++ b/src/controller/Dockerfile @@ -1,18 +1,17 @@ FROM golang:1.10-alpine AS build-env -RUN apk add --no-cache git bash curl -RUN curl https://glide.sh/get | sh +RUN apk add --no-cache git curl bash +RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh COPY . /go/src/github.com/sap/infrabox/ WORKDIR /go/src/github.com/sap/infrabox/src/controller -RUN glide install -RUN ./hack/update-codegen.sh -RUN go build +RUN dep ensure +RUN ./tmp/build/build.sh -FROM alpine:3.6 +FROM alpine:3.7 WORKDIR /app -COPY --from=build-env /go/src/github.com/sap/infrabox/src/controller /app/controller +COPY --from=build-env /go/src/github.com/sap/infrabox/src/controller/tmp/_output/bin/controller /app/controller -ENTRYPOINT ./controller/controller -logtostderr +ENTRYPOINT ./controller diff --git a/src/controller/Gopkg.lock b/src/controller/Gopkg.lock new file mode 100644 index 000000000..f6c2456c8 --- /dev/null +++ b/src/controller/Gopkg.lock @@ -0,0 +1,432 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/PuerkitoBio/purell" + packages = ["."] + revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log" + ] + revision = "2810ccc68e0ca445fa81ebfa03fbf70aca5c41ae" + version = "v2.7.0" + +[[projects]] + name = "github.com/ghodss/yaml" + packages = ["."] + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/spec" + packages = ["."] + revision = "bcff419492eeeb01f76e77d2ebc714dc97b607f5" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/swag" + packages = ["."] + revision = "811b1089cde9dad18d4d0c2d09fbdbf28dbd27a5" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys" + ] + revision = "1adfc126b41513cc696b209667c8656ea7aac67c" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/golang/glog" + packages = ["."] + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/google/btree" + packages = ["."] + revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4" + +[[projects]] + branch = "master" + name = "github.com/google/gofuzz" + packages = ["."] + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions" + ] + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + branch = "master" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache" + ] + revision = "9cad4c3443a7200dd6400aef47183728de563a38" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru" + ] + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + +[[projects]] + branch = "master" + name = "github.com/howeyc/gopass" + packages = ["."] + revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c" + version = "v0.3.4" + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" + version = "1.1.3" + +[[projects]] + name = "github.com/juju/ratelimit" + packages = ["."] + revision = "59fac5042749a5afb9af70e813da1dd5474f0167" + version = "1.0.1" + +[[projects]] + branch = "master" + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter" + ] + revision = "8b799c424f57fa123fc63a99d6383bc6e4c02578" + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" + version = "1.0.0" + +[[projects]] + branch = "master" + name = "github.com/onrik/logrus" + packages = ["filename"] + revision = "9db917e87735094e75f37662738b3bebb96eedaa" + +[[projects]] + branch = "master" + name = "github.com/operator-framework/operator-sdk" + packages = [ + "pkg/k8sclient", + "pkg/sdk", + "pkg/util/k8sutil", + "version" + ] + revision = "8a1704d319afaf123b67ab0b80ecb6e3c689328f" + +[[projects]] + branch = "master" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + name = "github.com/peterbourgon/diskv" + packages = ["."] + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "1a580b3eff7814fc9b40602fd35256c63b50f491" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna" + ] + revision = "8e0cdda24ed423affc8f35c241e5e9b16180338e" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows" + ] + revision = "7f59abf37be6a6007f075af1bc7f16f137bc176b" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[[projects]] + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1" + ] + revision = "acf347b865f29325eb61f4cd2df11e86e073a5ee" + version = "kubernetes-1.9.3" + +[[projects]] + branch = "master" + name = "k8s.io/apiextensions-apiserver" + packages = ["pkg/apis/apiextensions"] + revision = "bd76ce7dd8e65e8c2197803a1e64869ca5905309" + +[[projects]] + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1alpha1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/reflect" + ] + revision = "19e3f5aa3adca672c153d324e6b7d82ff8935f03" + version = "kubernetes-1.9.3" + +[[projects]] + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/cached", + "dynamic", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/version", + "rest", + "rest/watch", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/pager", + "tools/reference", + "transport", + "util/buffer", + "util/cert", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/workqueue" + ] + revision = "9389c055a838d4f208b699b3c7c51b70f2368861" + version = "kubernetes-1.9.3" + +[[projects]] + branch = "master" + name = "k8s.io/kube-openapi" + packages = ["pkg/common"] + revision = "67793630244cabba12e91ce1ec8d0d0b40802b56" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "687da0384571fe0a9ca4a8803ad398df033556d6cca323605f869936f63c021b" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/src/controller/Gopkg.toml b/src/controller/Gopkg.toml new file mode 100644 index 000000000..a78bcc8de --- /dev/null +++ b/src/controller/Gopkg.toml @@ -0,0 +1,17 @@ +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.9.3" + +[[override]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.9.3" + +[[override]] + name = "k8s.io/client-go" + version = "kubernetes-1.9.3" + +[[constraint]] + name = "github.com/operator-framework/operator-sdk" + # The version rule is used for a specific release and the master branch for in between releases. + branch = "master" + # version = "=v0.0.5" diff --git a/src/controller/cmd/controller/main.go b/src/controller/cmd/controller/main.go new file mode 100644 index 000000000..658ab52c8 --- /dev/null +++ b/src/controller/cmd/controller/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "runtime" + + stub "github.com/sap/infrabox/src/controller/pkg/stub" + sdk "github.com/operator-framework/operator-sdk/pkg/sdk" + k8sutil "github.com/operator-framework/operator-sdk/pkg/util/k8sutil" + sdkVersion "github.com/operator-framework/operator-sdk/version" + + "github.com/sirupsen/logrus" +) + +func printVersion() { + logrus.Infof("Go Version: %s", runtime.Version()) + logrus.Infof("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH) + logrus.Infof("operator-sdk Version: %v", sdkVersion.Version) +} + +func main() { + printVersion() + + resource := "core.infrabox.net/v1alpha1" + namespace, err := k8sutil.GetWatchNamespace() + if err != nil { + logrus.Fatalf("Failed to get watch namespace: %v", err) + } + resyncPeriod := 5 + //sdk.Watch(resource, "Workflow", namespace, resyncPeriod) + sdk.Watch(resource, "IBPipelineInvocation", namespace, resyncPeriod) + sdk.Watch(resource, "IBFunctionInvocation", namespace, resyncPeriod) + sdk.Handle(stub.NewHandler()) + sdk.Run(context.TODO()) +} diff --git a/src/controller/config/config.yaml b/src/controller/config/config.yaml new file mode 100644 index 000000000..10b90b7ac --- /dev/null +++ b/src/controller/config/config.yaml @@ -0,0 +1,3 @@ +apiVersion: core.infrabox.net/v1alpha1 +kind: FunctionInvocation +projectName: operator diff --git a/src/controller/controller.go b/src/controller/controller.go deleted file mode 100644 index b14f26733..000000000 --- a/src/controller/controller.go +++ /dev/null @@ -1,968 +0,0 @@ -package main - -import ( - "crypto/rsa" - "encoding/json" - "fmt" - jwt "github.com/dgrijalva/jwt-go" - "io/ioutil" - "os" - "strconv" - "time" - - goerr "errors" - - "github.com/golang/glog" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - batchlisters "k8s.io/client-go/listers/batch/v1" - podlisters "k8s.io/client-go/listers/core/v1" - rest "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - - restclient "k8s.io/client-go/rest" - - jobv1alpha1 "github.com/sap/infrabox/src/controller/pkg/apis/infrabox-controller/v1alpha1" - clientset "github.com/sap/infrabox/src/controller/pkg/client/clientset/versioned" - jobscheme "github.com/sap/infrabox/src/controller/pkg/client/clientset/versioned/scheme" - informers "github.com/sap/infrabox/src/controller/pkg/client/informers/externalversions" - listers "github.com/sap/infrabox/src/controller/pkg/client/listers/infrabox-controller/v1alpha1" -) - -const controllerAgentName = "infrabox-controller" - -type Controller struct { - kubeclientset kubernetes.Interface - jobclientset clientset.Interface - jobLister listers.IBJobLister - jobSynced cache.InformerSynced - k8sJobLister batchlisters.JobLister - k8sJobSynced cache.InformerSynced - podLister podlisters.PodLister - podSynced cache.InformerSynced - workqueue workqueue.RateLimitingInterface - recorder record.EventRecorder - config *restclient.Config - generalDontCheckCertificates string - localCacheEnabled string - jobMaxOutputSize string - jobMountdockerSocket string - daemonJSON string - rootURL string - tag string - dockerRegistry string - localCacheHostPath string - gerritEnabled string - gerritUsername string - gerritHostname string - gerritPort string -} - -// NewController returns a new job controller -func NewController( - kubeclientset kubernetes.Interface, - jobclientset clientset.Interface, - kubeInformerFactory kubeinformers.SharedInformerFactory, - jobInformerFactory informers.SharedInformerFactory, - config *restclient.Config) *Controller { - - jobInformer := jobInformerFactory.Core().V1alpha1().IBJobs() - k8sJobInformer := kubeInformerFactory.Batch().V1().Jobs() - podJobInformer := kubeInformerFactory.Core().V1().Pods() - - // Create event broadcaster - // Add sample-controller types to the default Kubernetes Scheme so Events can be - // logged for sample-controller types. - jobscheme.AddToScheme(scheme.Scheme) - glog.V(4).Info("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) - - data, err := ioutil.ReadFile("/etc/docker/daemon.json") - if err != nil { - panic(err) - } - - controller := &Controller{ - kubeclientset: kubeclientset, - jobclientset: jobclientset, - jobLister: jobInformer.Lister(), - jobSynced: jobInformer.Informer().HasSynced, - k8sJobLister: k8sJobInformer.Lister(), - k8sJobSynced: k8sJobInformer.Informer().HasSynced, - podLister: podJobInformer.Lister(), - podSynced: podJobInformer.Informer().HasSynced, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Jobs"), - recorder: recorder, - config: config, - generalDontCheckCertificates: os.Getenv("INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES"), - localCacheEnabled: os.Getenv("INFRABOX_LOCAL_CACHE_ENABLED"), - jobMaxOutputSize: os.Getenv("INFRABOX_JOB_MAX_OUTPUT_SIZE"), - jobMountdockerSocket: os.Getenv("INFRABOX_JOB_MOUNT_DOCKER_SOCKET"), - daemonJSON: string(data), - rootURL: os.Getenv("INFRABOX_ROOT_URL"), - tag: os.Getenv("INFRABOX_VERSION"), - dockerRegistry: os.Getenv("INFRABOX_GENERAL_DOCKER_REGISTRY"), - localCacheHostPath: os.Getenv("INFRABOX_LOCAL_CACHE_HOST_PATH"), - gerritEnabled: os.Getenv("INFRABOX_GERRIT_ENABLED"), - } - - if controller.gerritEnabled == "true" { - controller.gerritHostname = os.Getenv("INFRABOX_GERRIT_HOSTNAME") - controller.gerritUsername = os.Getenv("INFRABOX_GERRIT_USERNAME") - controller.gerritPort = os.Getenv("INFRABOX_GERRIT_PORT") - } - - glog.Info("Setting up event handlers") - - jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.enqueueJob, - UpdateFunc: func(old, new interface{}) { - controller.enqueueJob(new) - }, - DeleteFunc: func(old interface{}) {}, - }) - - return controller -} - -func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - defer runtime.HandleCrash() - defer c.workqueue.ShutDown() - - glog.Info("Starting Cluster controller") - - glog.Info("Waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, c.jobSynced); !ok { - return fmt.Errorf("failed to wait for caches to sync") - } - - if ok := cache.WaitForCacheSync(stopCh, c.podSynced); !ok { - return fmt.Errorf("failed to wait for caches to sync") - } - - if ok := cache.WaitForCacheSync(stopCh, c.k8sJobSynced); !ok { - return fmt.Errorf("failed to wait for caches to sync") - } - - glog.Info("Starting workers") - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - glog.Info("Started workers") - <-stopCh - glog.Info("Shutting down workers") - - return nil -} - -func (c *Controller) runWorker() { - for c.processNextWorkItem() { - } -} - -func (c *Controller) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() - - if shutdown { - return false - } - - err := func(obj interface{}) error { - defer c.workqueue.Done(obj) - var key string - var ok bool - - if key, ok = obj.(string); !ok { - c.workqueue.Forget(obj) - runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) - return nil - } - - if err := c.syncHandler(key); err != nil { - return fmt.Errorf("%s: error syncing: %s", key, err.Error()) - } - - c.workqueue.Forget(obj) - return nil - }(obj) - - if err != nil { - runtime.HandleError(err) - return true - } - - return true -} - -func (c *Controller) syncHandler(key string) error { - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) - return nil - } - - job, err := c.jobLister.IBJobs(namespace).Get(name) - - if err != nil { - if errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("%s: Cluster in work queue no longer exists", key)) - return nil - } - return err - } - - err = c.syncHandlerImpl(*job.DeepCopy()) - - if err != nil { - job = job.DeepCopy() - job.Status.Status = "error" - job.Status.Message = err.Error() - _, err := c.jobclientset.CoreV1alpha1().IBJobs(job.Namespace).Update(job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s: Failed to update status", key)) - return err - } - } - - return nil -} - -func (c *Controller) newBatchJob(job *jobv1alpha1.IBJob, token string) *batchv1.Job { - volumes := []corev1.Volume{ - corev1.Volume{ - Name: "data-dir", - }, - corev1.Volume{ - Name: "repo", - }, - } - - volumeMounts := []corev1.VolumeMount{ - corev1.VolumeMount{ - MountPath: "/data", - Name: "data-dir", - }, - corev1.VolumeMount{ - MountPath: "/repo", - Name: "repo", - }, - } - - mem, _ := job.Spec.Resources.Limits.Memory().AsInt64() - mem = mem / 1024 / 1024 - - env := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "INFRABOX_JOB_ID", - Value: job.Name, - }, - corev1.EnvVar{ - Name: "INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES", - Value: c.generalDontCheckCertificates, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_API_URL", - Value: c.rootURL + "/api/job", - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_GIT_URL", - Value: "http://localhost:8080", - }, - corev1.EnvVar{ - Name: "INFRABOX_SERVICE", - Value: "job", - }, - corev1.EnvVar{ - Name: "INFRABOX_VERSION", - Value: c.tag, - }, - corev1.EnvVar{ - Name: "INFRABOX_LOCAL_CACHE_ENABLED", - Value: c.localCacheEnabled, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_MAX_OUTPUT_SIZE", - Value: c.jobMaxOutputSize, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_MOUNT_DOCKER_SOCKET", - Value: c.jobMountdockerSocket, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_DAEMON_JSON", - Value: c.daemonJSON, - }, - corev1.EnvVar{ - Name: "INFRABOX_ROOT_URL", - Value: c.rootURL, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_TOKEN", - Value: token, - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_RESOURCES_LIMITS_MEMORY", - Value: strconv.FormatInt(mem, 10), - }, - corev1.EnvVar{ - Name: "INFRABOX_JOB_RESOURCES_LIMITS_CPU", - Value: job.Spec.Resources.Limits.Cpu().String(), - }, - } - - env = append(env, job.Spec.Env...) - - if c.localCacheEnabled == "true" { - volumes = append(volumes, corev1.Volume{ - Name: "local-cache", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: c.localCacheHostPath, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - MountPath: "/local-cache", - Name: "local-cache", - }) - } - - cloneEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES", - Value: c.generalDontCheckCertificates, - }, - } - cloneVolumeMounts := []corev1.VolumeMount{ - corev1.VolumeMount{ - MountPath: "/repo", - Name: "repo", - }, - } - - if c.gerritEnabled == "true" { - gerritEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "INFRABOX_GERRIT_HOSTNAME", - Value: c.gerritHostname, - }, - corev1.EnvVar{ - Name: "INFRABOX_GERRIT_USERNAME", - Value: c.gerritUsername, - }, - corev1.EnvVar{ - Name: "INFRABOX_GERRIT_PORT", - Value: c.gerritPort, - }, - } - - env = append(env, gerritEnv...) - cloneEnv = append(env, gerritEnv...) - - cloneVolumeMounts = append(cloneVolumeMounts, corev1.VolumeMount{ - Name: "gerrit-ssh", - MountPath: "/tmp/gerrit/", - }) - - volumes = append(volumes, corev1.Volume{ - Name: "gerrit-ssh", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "infrabox-gerrit-ssh", - }, - }, - }) - } - - for _, s := range job.Spec.Services { - id, _ := s.Metadata.Labels["service.infrabox.net/id"] - - volumes = append(volumes, corev1.Volume{ - Name: id, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: id, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: id, - MountPath: "/var/run/infrabox.net/services/" + s.Metadata.Name, - }) - } - - t := true - f := false - - runJob := corev1.Container{ - Name: "run-job", - ImagePullPolicy: "Always", - Image: c.dockerRegistry + "/job:" + c.tag, - SecurityContext: &corev1.SecurityContext{ - Privileged: &t, - }, - Env: env, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "cpu": job.Spec.Resources.Limits.Cpu().DeepCopy(), - "memory": job.Spec.Resources.Limits.Memory().DeepCopy(), - }, - Limits: corev1.ResourceList{ - "cpu": job.Spec.Resources.Limits.Cpu().DeepCopy(), - }, - }, - VolumeMounts: volumeMounts, - } - - gitJob := corev1.Container{ - Name: "git-clone", - ImagePullPolicy: "Always", - Image: c.dockerRegistry + "/job-git:" + c.tag, - Env: cloneEnv, - VolumeMounts: cloneVolumeMounts, - } - - containers := []corev1.Container{ - gitJob, runJob, - } - - return &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: job.Name, - Namespace: job.Namespace, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(job, schema.GroupVersionKind{ - Group: jobv1alpha1.SchemeGroupVersion.Group, - Version: jobv1alpha1.SchemeGroupVersion.Version, - Kind: "Job", - }), - }, - Labels: map[string]string{ - "job.infrabox.net/id": job.Name, - }, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: &f, - Containers: containers, - Volumes: volumes, - RestartPolicy: "OnFailure", - }, - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "job.infrabox.net/id": job.Name, - }, - }, - }, - }, - } -} - -func (c *Controller) deleteBatchJob(job *jobv1alpha1.IBJob) (bool, error) { - batch, err := c.jobLister.IBJobs(job.Namespace).Get(job.Name) - - if err != nil { - return errors.IsNotFound(err), err - } - - if batch == nil { - return true, nil - } - - glog.Infof("%s/%s: Deleting Batch Job", job.Namespace, job.Name) - err = c.kubeclientset.BatchV1().Jobs(job.Namespace).Delete(job.Name, metav1.NewDeleteOptions(0)) - - if err != nil { - if !errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete job: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - } - - return true, nil -} - -func (c *Controller) deletePods(job *jobv1alpha1.IBJob) (bool, error) { - l := labels.Set{ - "job.infrabox.net/id": job.Name, - } - - pods, err := c.podLister.Pods(job.Namespace).List(l.AsSelector()) - - if err != nil { - return false, err - } - - if len(pods) == 0 { - return true, nil - } - - for _, pod := range pods { - glog.Infof("%s/%s: Deleting pod", job.Namespace, job.Name) - err = c.kubeclientset.CoreV1().Pods(job.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) - - if err != nil { - if !errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete pod: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - } - } - - return false, nil -} - -func (c *Controller) deleteJob(job *jobv1alpha1.IBJob) error { - servicesDeleted, err := c.deleteServices(job) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete service: %s", job.Namespace, job.Name, err.Error())) - return err - } - - batchDeleted, err := c.deleteBatchJob(job) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete batch job: %s", job.Namespace, job.Name, err.Error())) - return err - } - - podsDeleted, err := c.deletePods(job) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete pods: %s", job.Namespace, job.Name, err.Error())) - return err - } - - if !servicesDeleted { - glog.Infof("%s/%s: Not all services deleted yet", job.Namespace, job.Name) - return nil - } - - if !batchDeleted { - glog.Infof("%s/%s: Batch not deleted yet", job.Namespace, job.Name) - return nil - } - - if !podsDeleted { - glog.Infof("%s/%s: Not all pods deleted yet", job.Namespace, job.Name) - return nil - } - - // Everything deleted, remove finalizers and delete job - glog.Infof("%s/%s: removing finalizers", job.Namespace, job.Name) - job.SetFinalizers([]string{}) - _, err = c.jobclientset.CoreV1alpha1().IBJobs(job.Namespace).Update(job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to set finalizers", job.Namespace, job.Name)) - return err - } - - err = c.jobclientset.CoreV1alpha1().IBJobs(job.Namespace).Delete(job.Name, metav1.NewDeleteOptions(0)) - - if err != nil && !errors.IsNotFound(err) { - glog.Warningf("%s/%s: Failed to delete IBJob: %v", err) - } - - glog.Infof("%s/%s: Successfully deleted job", job.Namespace, job.Name) - return nil -} - -func (c *Controller) deleteService(job *jobv1alpha1.IBJob, service *jobv1alpha1.IBJobService) (bool, error) { - glog.Infof("%s/%s: Deleting Service: %s", job.Namespace, job.Name, service.Metadata.Name) - - gv, resource, err := c.getGroupVersion(service, job) - - if err != nil { - return false, err - } - - rc, err := NewRESTClientForConfig(c.config, gv) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create rest client: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - id, _ := service.Metadata.Labels["service.infrabox.net/id"] - result := rc.Delete().Namespace(job.Namespace).Name(resource.Name).SubResource(id).Do() - - if err := result.Error(); err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - glog.Infof("%s/%s: Successfully deleted service", job.Namespace, job.Name) - return true, nil -} - -func NewConfig(inConfig *rest.Config, gv *schema.GroupVersion) *rest.Config { - config := rest.CopyConfig(inConfig) - config.GroupVersion = gv - config.APIPath = "/apis" - config.AcceptContentTypes = "application/json" - config.ContentType = "application/json" - config.NegotiatedSerializer = scheme.Codecs - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return config -} - -func NewRESTClientForConfig(inConfig *rest.Config, gv *schema.GroupVersion) (*rest.RESTClient, error) { - config := NewConfig(inConfig, gv) - - restClient, err := rest.UnversionedRESTClientFor(config) - if err != nil { - return nil, err - } - - return restClient, err -} - -func (c *Controller) getGroupVersion(service *jobv1alpha1.IBJobService, job *jobv1alpha1.IBJob) (*schema.GroupVersion, *metav1.APIResource, error) { - client, err := discovery.NewDiscoveryClientForConfig(c.config) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create discovery client: %s", job.Namespace, job.Name, err.Error())) - return nil, nil, err - } - - resourceList, err := client.ServerResourcesForGroupVersion(service.APIVersion) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to get server resources for group version: %s", job.Namespace, job.Name, err.Error())) - return nil, nil, err - } - - var resource metav1.APIResource - for _, res := range resourceList.APIResources { - if res.Kind == service.Kind { - resource = res - break - } - } - - gv, err := schema.ParseGroupVersion(service.APIVersion) - - if err != nil { - return nil, nil, err - } - - return &gv, &resource, nil -} - -func (c *Controller) getServiceInterface(job *jobv1alpha1.IBJob, gv *schema.GroupVersion, resource *metav1.APIResource) (dynamic.ResourceInterface, error) { - config := NewConfig(c.config, gv) - dyn, err := dynamic.NewClient(config) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create new rest client", job.Namespace, job.Name)) - return nil, err - } - - r := dyn.Resource(resource, job.Namespace) - return r, err -} - -func (c *Controller) createService(service *jobv1alpha1.IBJobService, job *jobv1alpha1.IBJob) (bool, error) { - gv, resource, err := c.getGroupVersion(service, job) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to get GroupVersion for service", job.Namespace, job.Name)) - return false, err - } - - si, err := c.getServiceInterface(job, gv, resource) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to get service interface", job.Namespace, job.Name)) - return false, err - } - - id, ok := service.Metadata.Labels["service.infrabox.net/id"] - if !ok { - runtime.HandleError(fmt.Errorf("%s/%s: Infrabox service id not set", job.Namespace, job.Name)) - return false, goerr.New("Infrabox service id not set") - } - - s, err := si.Get(id, metav1.GetOptions{}) - - if err != nil { - if !errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("%s/%s: Could not get service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - } - - if len(s.Object) != 0 { - glog.Infof("%s/%s: Service %s/%s already exists, checking status", job.Namespace, job.Name, service.APIVersion, service.Kind) - - // Already exists, check status - var remote jobv1alpha1.IBService - serviceJson, err := s.MarshalJSON() - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to parse service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - err = json.Unmarshal(serviceJson, &remote) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to parse service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - if remote.Status.Status == "ready" { - return true, nil - } - - if remote.Status.Status == "error" { - runtime.HandleError(fmt.Errorf("%s/%s: service is in state error: %s", job.Namespace, job.Name, remote.Status.Message)) - return false, goerr.New(remote.Status.Message) - } - } else { - glog.Infof("%s/%s: Service %s/%s does not yet exist, creating it", job.Namespace, job.Name, service.APIVersion, service.Kind) - - newService := map[string]interface{}{ - "apiVersion": service.APIVersion, - "kind": service.Kind, - "metadata": map[string]interface{}{ - "name": id, - "namespace": job.Namespace, - "labels": map[string]string{ - "service.infrabox.net/secret-name": id, - }, - }, - "spec": service.Spec, - } - - gv, err := schema.ParseGroupVersion(service.APIVersion) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to parse GroupVersion: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - rc, err := NewRESTClientForConfig(c.config, &gv) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create rest client: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - bytes, err := json.Marshal(newService) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to marshal service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - request := rc.Post().Namespace(job.Namespace).Name(resource.Name) - result := request.Body(bytes).Do() - - if err := result.Error(); err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to post service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - glog.Infof("%s/%s: Service %s/%s created", job.Namespace, job.Name, service.APIVersion, service.Kind) - } - - return false, nil -} - -func (c *Controller) deleteServices(job *jobv1alpha1.IBJob) (bool, error) { - if job.Spec.Services == nil { - return true, nil - } - - glog.Infof("%s/%s: Delete additional services", job.Namespace, job.Name) - - ready := true - for _, s := range job.Spec.Services { - r, err := c.deleteService(job, &s) - - if err != nil { - return false, nil - } - - if r { - glog.Infof("%s/%s: Service %s/%s deleted", job.Namespace, job.Name, s.APIVersion, s.Kind) - } else { - ready = false - glog.Infof("%s/%s: Service %s/%s not yet deleted", job.Namespace, job.Name, s.APIVersion, s.Kind) - } - } - - return ready, nil -} - -func (c *Controller) createServices(job *jobv1alpha1.IBJob) (bool, error) { - if job.Spec.Services == nil { - return true, nil - } - - glog.Infof("%s/%s: Create additional services", job.Namespace, job.Name) - - ready := true - for _, s := range job.Spec.Services { - r, err := c.createService(&s, job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create service: %s", job.Namespace, job.Name, err.Error())) - return false, err - } - - if r { - glog.Infof("%s/%s: Service %s/%s ready", job.Namespace, job.Name, s.APIVersion, s.Kind) - } else { - ready = false - glog.Infof("%s/%s: Service %s/%s not yet ready", job.Namespace, job.Name, s.APIVersion, s.Kind) - } - } - - return ready, nil -} - -func (c *Controller) createBatchJob(job *jobv1alpha1.IBJob) error { - glog.Infof("%s/%s: Creating Batch Job", job.Namespace, job.Name) - - keyPath := os.Getenv("INFRABOX_RSA_PRIVATE_KEY_PATH") - - if keyPath == "" { - keyPath = "/var/run/secrets/infrabox.net/rsa/id_rsa" - } - - var signKey *rsa.PrivateKey - - signBytes, err := ioutil.ReadFile(keyPath) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create token", job.Namespace, job.Name)) - return err - } - - signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create token", job.Namespace, job.Name)) - return err - } - - t := jwt.NewWithClaims(jwt.GetSigningMethod("RS256"), jwt.MapClaims{ - "job": map[string]string{ - "id": job.Name, - }, - "type": "job", - }) - - token, err := t.SignedString(signKey) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to creat token", job.Namespace, job.Name)) - return err - } - - _, err = c.kubeclientset.BatchV1().Jobs(job.Namespace).Create(c.newBatchJob(job, token)) - - if err != nil && !errors.IsAlreadyExists(err) { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create job: %s", job.Namespace, job.Name, err.Error())) - return err - } - - glog.Infof("%s/%s: Successfully created batch job", job.Namespace, job.Name) - return nil -} - -func (c *Controller) createJob(job *jobv1alpha1.IBJob) error { - // First set finalizers so we don't forget to delete it later on - job.SetFinalizers([]string{"job.infrabox.net"}) - job, err := c.jobclientset.CoreV1alpha1().IBJobs(job.Namespace).Update(job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to set finalizers", job.Namespace, job.Name)) - return err - } - - servicesCreated, err := c.createServices(job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create services: %s", job.Namespace, job.Name, err.Error())) - return err - } - - if !servicesCreated { - glog.Infof("%s/%s: Services not yet ready", job.Namespace, job.Name) - return nil - } - - err = c.createBatchJob(job) - - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create batch job: %s", job.Namespace, job.Name, err.Error())) - return err - } - - glog.Infof("%s/%s: Batch job created", job.Namespace, job.Name) - return nil -} - -func (c *Controller) syncHandlerImpl(job jobv1alpha1.IBJob) error { - // Check wether we should delete the job - delTimestamp := job.GetDeletionTimestamp() - if delTimestamp != nil { - return c.deleteJob(&job) - } - - if job.Status.Status == "error" { - glog.Infof("%s/%s: job in error state, ignoring", job.Namespace, job.Name) - return nil - } - - // Get the K8s Job - k8sjob, err := c.k8sJobLister.Jobs(job.Namespace).Get(job.Name) - - if err != nil { - if !errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to get job: %s", job.Namespace, job.Name, err.Error())) - return err - } - } - - if k8sjob == nil { - err = c.createJob(&job) - if err != nil { - runtime.HandleError(fmt.Errorf("%s/%s: Failed to create job: %s", job.Namespace, job.Name, err.Error())) - return err - } - } - - return nil -} - -func (c *Controller) enqueueJob(obj interface{}) { - var key string - var err error - if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - runtime.HandleError(err) - return - } - c.workqueue.AddRateLimited(key) -} diff --git a/src/controller/deploy/operator.yaml b/src/controller/deploy/operator.yaml new file mode 100644 index 000000000..ee1ba1b51 --- /dev/null +++ b/src/controller/deploy/operator.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator +spec: + replicas: 1 + selector: + matchLabels: + name: operator + template: + metadata: + labels: + name: operator + spec: + containers: + - name: operator + image: 192.168.1.31:5000/infrabox/controller + command: + - operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/src/controller/glide.lock b/src/controller/glide.lock deleted file mode 100644 index c56478f7e..000000000 --- a/src/controller/glide.lock +++ /dev/null @@ -1,289 +0,0 @@ -hash: f9827ad7f51ce57cf228db37049053a9dd6a339c5b2c5485779ce39ec69c7faa -updated: 2018-05-10T10:20:45.623417909+02:00 -imports: -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/dgrijalva/jwt-go - version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e -- name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee -- name: github.com/gogo/protobuf - version: c0656edd0d9eab7c66d1eb0c568f9039345796f7 - subpackages: - - proto - - sortkeys -- name: github.com/golang/glog - version: 44145f04b68cf362d9c4df2182967c2275eaefed -- name: github.com/golang/groupcache - version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 - subpackages: - - lru -- name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 - subpackages: - - proto - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/google/gofuzz - version: 44d81051d367757e1c7c6a5a86423ece9afcf63c -- name: github.com/googleapis/gnostic - version: 0c5108395e2debce0d731cf0287ddf7242066aba - subpackages: - - OpenAPIv2 - - compiler - - extensions -- name: github.com/hashicorp/golang-lru - version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 - subpackages: - - simplelru -- name: github.com/howeyc/gopass - version: bf9dde6d0d2c004a008c27aaee91170c786f6db8 -- name: github.com/imdario/mergo - version: 6633656539c1639d9d78127b7d47c622b5d7b6dc -- name: github.com/json-iterator/go - version: 13f86432b882000a51c6e610c620974462691a97 -- name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea -- name: golang.org/x/crypto - version: 81e90905daefcd6fd217b62423c0908922eadb30 - subpackages: - - ssh/terminal -- name: golang.org/x/net - version: 1c05540f6879653db88113bc4a2b70aec4bd491f - subpackages: - - context - - http2 - - http2/hpack - - idna - - lex/httplex -- name: golang.org/x/sys - version: 95c6576299259db960f6c5b9b69ea52422860fce - subpackages: - - unix - - windows -- name: golang.org/x/text - version: b19bf474d317b857955b12035d2c5acb57ce8b01 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: golang.org/x/time - version: f51c12702a4d776e4c1fa9b0fabab841babae631 - subpackages: - - rate -- name: gopkg.in/inf.v0 - version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -- name: gopkg.in/yaml.v2 - version: 670d4cfef0544295bc27a114dbac37980d83185a -- name: k8s.io/api - version: 73d903622b7391f3312dcbac6483fed484e185f8 - subpackages: - - admissionregistration/v1alpha1 - - admissionregistration/v1beta1 - - apps/v1 - - apps/v1beta1 - - apps/v1beta2 - - authentication/v1 - - authentication/v1beta1 - - authorization/v1 - - authorization/v1beta1 - - autoscaling/v1 - - autoscaling/v2beta1 - - batch/v1 - - batch/v1beta1 - - batch/v2alpha1 - - certificates/v1beta1 - - core/v1 - - events/v1beta1 - - extensions/v1beta1 - - networking/v1 - - policy/v1beta1 - - rbac/v1 - - rbac/v1alpha1 - - rbac/v1beta1 - - scheduling/v1alpha1 - - settings/v1alpha1 - - storage/v1 - - storage/v1alpha1 - - storage/v1beta1 -- name: k8s.io/apimachinery - version: 302974c03f7e50f16561ba237db776ab93594ef6 - subpackages: - - pkg/api/errors - - pkg/api/meta - - pkg/api/resource - - pkg/apis/meta/internalversion - - pkg/apis/meta/v1 - - pkg/apis/meta/v1/unstructured - - pkg/apis/meta/v1beta1 - - pkg/conversion - - pkg/conversion/queryparams - - pkg/fields - - pkg/labels - - pkg/runtime - - pkg/runtime/schema - - pkg/runtime/serializer - - pkg/runtime/serializer/json - - pkg/runtime/serializer/protobuf - - pkg/runtime/serializer/recognizer - - pkg/runtime/serializer/streaming - - pkg/runtime/serializer/versioning - - pkg/selection - - pkg/types - - pkg/util/cache - - pkg/util/clock - - pkg/util/diff - - pkg/util/errors - - pkg/util/framer - - pkg/util/intstr - - pkg/util/json - - pkg/util/mergepatch - - pkg/util/net - - pkg/util/runtime - - pkg/util/sets - - pkg/util/strategicpatch - - pkg/util/validation - - pkg/util/validation/field - - pkg/util/wait - - pkg/util/yaml - - pkg/version - - pkg/watch - - third_party/forked/golang/json - - third_party/forked/golang/reflect -- name: k8s.io/client-go - version: 23781f4d6632d88e869066eaebb743857aa1ef9b - subpackages: - - discovery - - discovery/fake - - dynamic - - informers - - informers/admissionregistration - - informers/admissionregistration/v1alpha1 - - informers/admissionregistration/v1beta1 - - informers/apps - - informers/apps/v1 - - informers/apps/v1beta1 - - informers/apps/v1beta2 - - informers/autoscaling - - informers/autoscaling/v1 - - informers/autoscaling/v2beta1 - - informers/batch - - informers/batch/v1 - - informers/batch/v1beta1 - - informers/batch/v2alpha1 - - informers/certificates - - informers/certificates/v1beta1 - - informers/core - - informers/core/v1 - - informers/events - - informers/events/v1beta1 - - informers/extensions - - informers/extensions/v1beta1 - - informers/internalinterfaces - - informers/networking - - informers/networking/v1 - - informers/policy - - informers/policy/v1beta1 - - informers/rbac - - informers/rbac/v1 - - informers/rbac/v1alpha1 - - informers/rbac/v1beta1 - - informers/scheduling - - informers/scheduling/v1alpha1 - - informers/settings - - informers/settings/v1alpha1 - - informers/storage - - informers/storage/v1 - - informers/storage/v1alpha1 - - informers/storage/v1beta1 - - kubernetes - - kubernetes/scheme - - kubernetes/typed/admissionregistration/v1alpha1 - - kubernetes/typed/admissionregistration/v1beta1 - - kubernetes/typed/apps/v1 - - kubernetes/typed/apps/v1beta1 - - kubernetes/typed/apps/v1beta2 - - kubernetes/typed/authentication/v1 - - kubernetes/typed/authentication/v1beta1 - - kubernetes/typed/authorization/v1 - - kubernetes/typed/authorization/v1beta1 - - kubernetes/typed/autoscaling/v1 - - kubernetes/typed/autoscaling/v2beta1 - - kubernetes/typed/batch/v1 - - kubernetes/typed/batch/v1beta1 - - kubernetes/typed/batch/v2alpha1 - - kubernetes/typed/certificates/v1beta1 - - kubernetes/typed/core/v1 - - kubernetes/typed/events/v1beta1 - - kubernetes/typed/extensions/v1beta1 - - kubernetes/typed/networking/v1 - - kubernetes/typed/policy/v1beta1 - - kubernetes/typed/rbac/v1 - - kubernetes/typed/rbac/v1alpha1 - - kubernetes/typed/rbac/v1beta1 - - kubernetes/typed/scheduling/v1alpha1 - - kubernetes/typed/settings/v1alpha1 - - kubernetes/typed/storage/v1 - - kubernetes/typed/storage/v1alpha1 - - kubernetes/typed/storage/v1beta1 - - listers/admissionregistration/v1alpha1 - - listers/admissionregistration/v1beta1 - - listers/apps/v1 - - listers/apps/v1beta1 - - listers/apps/v1beta2 - - listers/autoscaling/v1 - - listers/autoscaling/v2beta1 - - listers/batch/v1 - - listers/batch/v1beta1 - - listers/batch/v2alpha1 - - listers/certificates/v1beta1 - - listers/core/v1 - - listers/events/v1beta1 - - listers/extensions/v1beta1 - - listers/networking/v1 - - listers/policy/v1beta1 - - listers/rbac/v1 - - listers/rbac/v1alpha1 - - listers/rbac/v1beta1 - - listers/scheduling/v1alpha1 - - listers/settings/v1alpha1 - - listers/storage/v1 - - listers/storage/v1alpha1 - - listers/storage/v1beta1 - - pkg/apis/clientauthentication - - pkg/apis/clientauthentication/v1alpha1 - - pkg/version - - plugin/pkg/client/auth/exec - - rest - - rest/watch - - testing - - tools/auth - - tools/cache - - tools/clientcmd - - tools/clientcmd/api - - tools/clientcmd/api/latest - - tools/clientcmd/api/v1 - - tools/metrics - - tools/pager - - tools/record - - tools/reference - - transport - - util/buffer - - util/cert - - util/flowcontrol - - util/homedir - - util/integer - - util/retry - - util/workqueue -- name: k8s.io/code-generator - version: cbd9dba38c3d8e0035d4bb554bd321e2c190e629 -- name: k8s.io/kube-openapi - version: 50ae88d24ede7b8bad68e23c805b5d3da5c8abaf - subpackages: - - pkg/util/proto -testImports: [] diff --git a/src/controller/glide.yaml b/src/controller/glide.yaml deleted file mode 100644 index 506f833fe..000000000 --- a/src/controller/glide.yaml +++ /dev/null @@ -1,43 +0,0 @@ -package: github.com/SAP/infrabox/src/controller -import: -- package: github.com/dgrijalva/jwt-go -- package: github.com/golang/glog -- package: k8s.io/api - version: kubernetes-1.10.2 - subpackages: - - batch/v1 - - core/v1 -- package: k8s.io/apimachinery - version: kubernetes-1.10.2 - subpackages: - - pkg/api/errors - - pkg/apis/meta/v1 - - pkg/apis/meta/v1/unstructured - - pkg/labels - - pkg/runtime - - pkg/runtime/schema - - pkg/runtime/serializer - - pkg/types - - pkg/util/runtime - - pkg/util/wait - - pkg/watch -- package: k8s.io/client-go - version: kubernetes-1.10.2 - subpackages: - - discovery - - discovery/fake - - dynamic - - informers - - kubernetes - - kubernetes/scheme - - kubernetes/typed/core/v1 - - listers/batch/v1 - - rest - - testing - - tools/cache - - tools/clientcmd - - tools/record - - util/flowcontrol - - util/workqueue -- package: k8s.io/code-generator - version: release-1.10 diff --git a/src/controller/hack/boilerplate.go.txt b/src/controller/hack/boilerplate.go.txt deleted file mode 100644 index 92f496cac..000000000 --- a/src/controller/hack/boilerplate.go.txt +++ /dev/null @@ -1,16 +0,0 @@ -/* -Copyright The InfraBox Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - diff --git a/src/controller/hack/update-codegen.sh b/src/controller/hack/update-codegen.sh deleted file mode 100755 index 27dd34f67..000000000 --- a/src/controller/hack/update-codegen.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} - -echo $CODEGEN_PKG - -# generate the code with: -# --output-base because this script should also be able to run inside the vendor dir of -# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir -# instead of the $GOPATH directly. For normal projects this can be dropped. -${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ - github.com/sap/infrabox/src/controller/pkg/client github.com/sap/infrabox/src/controller/pkg/apis \ - infrabox-controller:v1alpha1 \ - --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt diff --git a/src/controller/hack/verify-codegen.sh b/src/controller/hack/verify-codegen.sh deleted file mode 100755 index 9cc02a5a4..000000000 --- a/src/controller/hack/verify-codegen.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. - -DIFFROOT="${SCRIPT_ROOT}/pkg" -TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" -_tmp="${SCRIPT_ROOT}/_tmp" - -cleanup() { - rm -rf "${_tmp}" -} -trap "cleanup" EXIT SIGINT - -cleanup - -mkdir -p "${TMP_DIFFROOT}" -cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" - -"${SCRIPT_ROOT}/hack/update-codegen.sh" -echo "diffing ${DIFFROOT} against freshly generated codegen" -ret=0 -diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$? -cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}" -if [[ $ret -eq 0 ]] -then - echo "${DIFFROOT} up to date." -else - echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh" - exit 1 -fi diff --git a/src/controller/job.yaml b/src/controller/job.yaml deleted file mode 100644 index a4da14a35..000000000 --- a/src/controller/job.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: infrabox.net/v1alpha1 -kind: Job -metadata: - name: test-job -spec: - resources: - limits: - memory: 1024 - cpu: 1 diff --git a/src/controller/main.go b/src/controller/main.go deleted file mode 100644 index 702913ff2..000000000 --- a/src/controller/main.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "flag" - "time" - - "github.com/golang/glog" - clientset "github.com/sap/infrabox/src/controller/pkg/client/clientset/versioned" - informers "github.com/sap/infrabox/src/controller/pkg/client/informers/externalversions" - "github.com/sap/infrabox/src/controller/pkg/signals" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" -) - -var ( - masterURL string - kubeconfig string -) - -func main() { - flag.Parse() - - // set up signals so we handle the first shutdown signal gracefully - stopCh := signals.SetupSignalHandler() - - cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) - if err != nil { - glog.Fatalf("Error building kubeconfig: %s", err.Error()) - } - - kubeClient, err := kubernetes.NewForConfig(cfg) - if err != nil { - glog.Fatalf("Error building kubernetes clientset: %s", err.Error()) - } - - clusterClient, err := clientset.NewForConfig(cfg) - if err != nil { - glog.Fatalf("Error building cluster clientset: %s", err.Error()) - } - - kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) - clusterInformerFactory := informers.NewSharedInformerFactory(clusterClient, time.Second*30) - - controller := NewController(kubeClient, clusterClient, kubeInformerFactory, clusterInformerFactory, cfg) - - go kubeInformerFactory.Start(stopCh) - go clusterInformerFactory.Start(stopCh) - - if err = controller.Run(2, stopCh); err != nil { - glog.Fatalf("Error running controller: %s", err.Error()) - } -} - -func init() { - flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") - flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") -} diff --git a/src/controller/pkg/apis/infrabox-controller/v1alpha1/doc.go b/src/controller/pkg/apis/core/v1alpha1/doc.go similarity index 100% rename from src/controller/pkg/apis/infrabox-controller/v1alpha1/doc.go rename to src/controller/pkg/apis/core/v1alpha1/doc.go diff --git a/src/controller/pkg/apis/core/v1alpha1/register.go b/src/controller/pkg/apis/core/v1alpha1/register.go new file mode 100644 index 000000000..f61a3fd9a --- /dev/null +++ b/src/controller/pkg/apis/core/v1alpha1/register.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + sdkK8sutil "github.com/operator-framework/operator-sdk/pkg/util/k8sutil" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + version = "v1alpha1" + groupName = "core.infrabox.net" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion is the group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: version} +) + +func init() { + sdkK8sutil.AddToSDKScheme(AddToScheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &IBPipeline{}, + &IBPipelineList{}, + &IBPipelineInvocation{}, + &IBPipelineInvocationList{}, + &IBFunctionInvocation{}, + &IBFunctionInvocationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/src/controller/pkg/apis/core/v1alpha1/types.go b/src/controller/pkg/apis/core/v1alpha1/types.go new file mode 100644 index 000000000..d79e7b6de --- /dev/null +++ b/src/controller/pkg/apis/core/v1alpha1/types.go @@ -0,0 +1,140 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + //"k8s.io/api/core/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBPipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []IBPipeline `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBPipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec IBPipelineSpec `json:"spec"` +} + +type IBPipelineSpec struct { + Steps []IBPipelineStep `json:"steps"` +} + +type IBPipelineStep struct { + Name string `json:"name"` + FunctionName string `json:"functionName"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type WorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Workflow `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Workflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec WorkflowSpec `json:"spec"` + Status WorkflowStatus `json:"status"` +} + +type WorkflowStatus struct { + Status string `json:"status"` + Message string `json:"message"` +} + +type WorkflowSpec struct { + Pipelines []IBPipelineDefinitionSpec `json:"pipelines"` +} + +type IBPipelineDefinitionSpec struct { + Name string `json:"name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBPipelineInvocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []IBPipelineInvocation `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBPipelineInvocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec IBPipelineInvocationSpec `json:"spec"` + Status IBPipelineInvocationStatus `json:"status"` +} + +type IBPipelineInvocationStatus struct { + State string `json:"state"` + Message string `json:"message"` + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + StepStatuses []IBFunctionInvocationStatus `json:"stepStatuses,omitempty"` +} + +type IBPipelineInvocationSpec struct { + PipelineName string `json:"pipelineName"` + Steps map[string]IBPipelineInvocationStep `json:"steps"` + Services []IBPipelineService `json:"services,omitempty"` +} + +type IBPipelineService struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata IBPipelineServiceMetadata `json:"metadata"` +} + +type IBPipelineServiceMetadata struct { + Name string `json:"name"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +type IBPipelineInvocationStep struct { + Name string + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBFunctionInvocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []IBFunctionInvocationList `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IBFunctionInvocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec IBFunctionInvocationSpec `json:"spec"` + Status IBFunctionInvocationStatus `json:"status"` +} + +type IBFunctionInvocationStatus struct { + State corev1.ContainerState +} + +type IBFunctionInvocationSpec struct { + FunctionName string `json:"functionName"` + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` +} diff --git a/src/controller/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/src/controller/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..b5efaae3b --- /dev/null +++ b/src/controller/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,567 @@ +// +build !ignore_autogenerated + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBFunctionInvocation) DeepCopyInto(out *IBFunctionInvocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBFunctionInvocation. +func (in *IBFunctionInvocation) DeepCopy() *IBFunctionInvocation { + if in == nil { + return nil + } + out := new(IBFunctionInvocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBFunctionInvocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBFunctionInvocationList) DeepCopyInto(out *IBFunctionInvocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBFunctionInvocationList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBFunctionInvocationList. +func (in *IBFunctionInvocationList) DeepCopy() *IBFunctionInvocationList { + if in == nil { + return nil + } + out := new(IBFunctionInvocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBFunctionInvocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBFunctionInvocationSpec) DeepCopyInto(out *IBFunctionInvocationSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + if *in == nil { + *out = nil + } else { + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBFunctionInvocationSpec. +func (in *IBFunctionInvocationSpec) DeepCopy() *IBFunctionInvocationSpec { + if in == nil { + return nil + } + out := new(IBFunctionInvocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBFunctionInvocationStatus) DeepCopyInto(out *IBFunctionInvocationStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBFunctionInvocationStatus. +func (in *IBFunctionInvocationStatus) DeepCopy() *IBFunctionInvocationStatus { + if in == nil { + return nil + } + out := new(IBFunctionInvocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipeline) DeepCopyInto(out *IBPipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipeline. +func (in *IBPipeline) DeepCopy() *IBPipeline { + if in == nil { + return nil + } + out := new(IBPipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineDefinitionSpec) DeepCopyInto(out *IBPipelineDefinitionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineDefinitionSpec. +func (in *IBPipelineDefinitionSpec) DeepCopy() *IBPipelineDefinitionSpec { + if in == nil { + return nil + } + out := new(IBPipelineDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineInvocation) DeepCopyInto(out *IBPipelineInvocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineInvocation. +func (in *IBPipelineInvocation) DeepCopy() *IBPipelineInvocation { + if in == nil { + return nil + } + out := new(IBPipelineInvocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPipelineInvocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineInvocationList) DeepCopyInto(out *IBPipelineInvocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPipelineInvocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineInvocationList. +func (in *IBPipelineInvocationList) DeepCopy() *IBPipelineInvocationList { + if in == nil { + return nil + } + out := new(IBPipelineInvocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPipelineInvocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineInvocationSpec) DeepCopyInto(out *IBPipelineInvocationSpec) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make(map[string]IBPipelineInvocationStep, len(*in)) + for key, val := range *in { + newVal := new(IBPipelineInvocationStep) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]IBPipelineService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineInvocationSpec. +func (in *IBPipelineInvocationSpec) DeepCopy() *IBPipelineInvocationSpec { + if in == nil { + return nil + } + out := new(IBPipelineInvocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineInvocationStatus) DeepCopyInto(out *IBPipelineInvocationStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.StepStatuses != nil { + in, out := &in.StepStatuses, &out.StepStatuses + *out = make([]IBFunctionInvocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineInvocationStatus. +func (in *IBPipelineInvocationStatus) DeepCopy() *IBPipelineInvocationStatus { + if in == nil { + return nil + } + out := new(IBPipelineInvocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineInvocationStep) DeepCopyInto(out *IBPipelineInvocationStep) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + if *in == nil { + *out = nil + } else { + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineInvocationStep. +func (in *IBPipelineInvocationStep) DeepCopy() *IBPipelineInvocationStep { + if in == nil { + return nil + } + out := new(IBPipelineInvocationStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineList) DeepCopyInto(out *IBPipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IBPipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineList. +func (in *IBPipelineList) DeepCopy() *IBPipelineList { + if in == nil { + return nil + } + out := new(IBPipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBPipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineService) DeepCopyInto(out *IBPipelineService) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineService. +func (in *IBPipelineService) DeepCopy() *IBPipelineService { + if in == nil { + return nil + } + out := new(IBPipelineService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineServiceMetadata) DeepCopyInto(out *IBPipelineServiceMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineServiceMetadata. +func (in *IBPipelineServiceMetadata) DeepCopy() *IBPipelineServiceMetadata { + if in == nil { + return nil + } + out := new(IBPipelineServiceMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineSpec) DeepCopyInto(out *IBPipelineSpec) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]IBPipelineStep, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineSpec. +func (in *IBPipelineSpec) DeepCopy() *IBPipelineSpec { + if in == nil { + return nil + } + out := new(IBPipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBPipelineStep) DeepCopyInto(out *IBPipelineStep) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBPipelineStep. +func (in *IBPipelineStep) DeepCopy() *IBPipelineStep { + if in == nil { + return nil + } + out := new(IBPipelineStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workflow) DeepCopyInto(out *Workflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflow. +func (in *Workflow) DeepCopy() *Workflow { + if in == nil { + return nil + } + out := new(Workflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workflow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowList. +func (in *WorkflowList) DeepCopy() *WorkflowList { + if in == nil { + return nil + } + out := new(WorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { + *out = *in + if in.Pipelines != nil { + in, out := &in.Pipelines, &out.Pipelines + *out = make([]IBPipelineDefinitionSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowSpec. +func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { + if in == nil { + return nil + } + out := new(WorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. +func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { + if in == nil { + return nil + } + out := new(WorkflowStatus) + in.DeepCopyInto(out) + return out +} diff --git a/src/controller/pkg/apis/infrabox-controller/register.go b/src/controller/pkg/apis/infrabox-controller/register.go deleted file mode 100644 index d10949311..000000000 --- a/src/controller/pkg/apis/infrabox-controller/register.go +++ /dev/null @@ -1,5 +0,0 @@ -package jobcontroller - -const ( - GroupName = "core.infrabox.net" -) diff --git a/src/controller/pkg/apis/infrabox-controller/v1alpha1/register.go b/src/controller/pkg/apis/infrabox-controller/v1alpha1/register.go deleted file mode 100644 index 440e7855b..000000000 --- a/src/controller/pkg/apis/infrabox-controller/v1alpha1/register.go +++ /dev/null @@ -1,37 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - controller "github.com/sap/infrabox/src/controller/pkg/apis/infrabox-controller" -) - -// SchemeGroupVersion is group version used to register these object/Groups -var SchemeGroupVersion = schema.GroupVersion{Group: controller.GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &IBJob{}, - &IBJobList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/src/controller/pkg/apis/infrabox-controller/v1alpha1/types.go b/src/controller/pkg/apis/infrabox-controller/v1alpha1/types.go deleted file mode 100644 index 1a54bed5b..000000000 --- a/src/controller/pkg/apis/infrabox-controller/v1alpha1/types.go +++ /dev/null @@ -1,59 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type IBJob struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec JobSpec `json:"spec"` - Status JobStatus `json:"status"` -} - -type JobSpec struct { - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` - Services []IBJobService `json:"services,omitempty"` -} - -type IBJobService struct { - APIVersion string `json:"apiVersion"` - Kind string `json:"kind"` - Metadata IBJobServiceMetadata `json:"metadata"` - Spec map[string]string `json:"spec,omitempty"` -} - -type IBJobServiceMetadata struct { - Name string `json:"name"` - Labels map[string]string `json:"labels,omitempty"` -} - -type IBService struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Status ServiceStatus `json:"status,omitempty"` -} - -type ServiceStatus struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` -} - -type JobStatus struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type IBJobList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []IBJob `json:"items"` -} diff --git a/src/controller/pkg/apis/infrabox-controller/v1alpha1/zz_generated.deepcopy.go b/src/controller/pkg/apis/infrabox-controller/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 026cac8b2..000000000 --- a/src/controller/pkg/apis/infrabox-controller/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,216 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The InfraBox Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBJob) DeepCopyInto(out *IBJob) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBJob. -func (in *IBJob) DeepCopy() *IBJob { - if in == nil { - return nil - } - out := new(IBJob) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IBJob) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBJobList) DeepCopyInto(out *IBJobList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IBJob, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBJobList. -func (in *IBJobList) DeepCopy() *IBJobList { - if in == nil { - return nil - } - out := new(IBJobList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IBJobList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBJobService) DeepCopyInto(out *IBJobService) { - *out = *in - in.Metadata.DeepCopyInto(&out.Metadata) - if in.Spec != nil { - in, out := &in.Spec, &out.Spec - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBJobService. -func (in *IBJobService) DeepCopy() *IBJobService { - if in == nil { - return nil - } - out := new(IBJobService) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBJobServiceMetadata) DeepCopyInto(out *IBJobServiceMetadata) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBJobServiceMetadata. -func (in *IBJobServiceMetadata) DeepCopy() *IBJobServiceMetadata { - if in == nil { - return nil - } - out := new(IBJobServiceMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBService) DeepCopyInto(out *IBService) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBService. -func (in *IBService) DeepCopy() *IBService { - if in == nil { - return nil - } - out := new(IBService) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobSpec) DeepCopyInto(out *JobSpec) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]IBJobService, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. -func (in *JobSpec) DeepCopy() *JobSpec { - if in == nil { - return nil - } - out := new(JobSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobStatus) DeepCopyInto(out *JobStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. -func (in *JobStatus) DeepCopy() *JobStatus { - if in == nil { - return nil - } - out := new(JobStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} diff --git a/src/controller/pkg/signals/signal.go b/src/controller/pkg/signals/signal.go deleted file mode 100644 index 6bddfddb4..000000000 --- a/src/controller/pkg/signals/signal.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package signals - -import ( - "os" - "os/signal" -) - -var onlyOneSignalHandler = make(chan struct{}) - -// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned -// which is closed on one of these signals. If a second signal is caught, the program -// is terminated with exit code 1. -func SetupSignalHandler() (stopCh <-chan struct{}) { - close(onlyOneSignalHandler) // panics when called twice - - stop := make(chan struct{}) - c := make(chan os.Signal, 2) - signal.Notify(c, shutdownSignals...) - go func() { - <-c - close(stop) - <-c - os.Exit(1) // second signal. Exit directly. - }() - - return stop -} diff --git a/src/controller/pkg/signals/signal_posix.go b/src/controller/pkg/signals/signal_posix.go deleted file mode 100644 index 9bdb4e741..000000000 --- a/src/controller/pkg/signals/signal_posix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package signals - -import ( - "os" - "syscall" -) - -var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/src/controller/pkg/signals/signal_windows.go b/src/controller/pkg/signals/signal_windows.go deleted file mode 100644 index 4907d573f..000000000 --- a/src/controller/pkg/signals/signal_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package signals - -import ( - "os" -) - -var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/src/controller/pkg/stub/function.go b/src/controller/pkg/stub/function.go new file mode 100644 index 000000000..489c0a7fe --- /dev/null +++ b/src/controller/pkg/stub/function.go @@ -0,0 +1,317 @@ +package stub + +import ( + "encoding/json" + //goerr "errors" + "github.com/sap/infrabox/src/controller/pkg/apis/core/v1alpha1" + + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apimachinery/pkg/api/errors" + + //"k8s.io/apimachinery/pkg/api/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/operator-framework/operator-sdk/pkg/k8sclient" +) + +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec FunctionSpec `json:"spec"` +} + +type FunctionSpec struct { + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` +} + +type FunctionValidation struct { + OpenAPIV3Schema *apiextensions.JSONSchemaProps +} + +func getFunction(name string, log *logrus.Entry) (*Function, error) { + logrus.Infof("Get function: %s", name) + + resourceClient, _, err := k8sclient.GetResourceClient("core.infrabox.net/v1alpha1", "IBFunction", "") + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return nil, err + } + + f, err := resourceClient.Get(name, metav1.GetOptions{}) + if err != nil { + log.Errorf("failed to get function: %v", err) + return nil, err + } + + j, err := f.MarshalJSON() + + if err != nil { + log.Errorf("failed to marshal json: %v", err) + return nil, err + } + + var function Function + err = json.Unmarshal(j, &function) + + if err != nil { + log.Errorf("failed to unmarshal json: %v", err) + return nil, err + } + + return &function, nil +} + +func validateFunctionInvocation(cr *v1alpha1.IBFunctionInvocation) error { + return nil +} + +func (c *Controller) syncFunctionInvocation(cr *v1alpha1.IBFunctionInvocation, log *logrus.Entry) error { + logrus.Info("Sync function invocation") + + finalizers := cr.GetFinalizers() + + // validate workflow on first occurence + if len(finalizers) == 0 { + err := validateFunctionInvocation(cr) + + if err != nil { + return err + } + + // Set finalizers + cr.SetFinalizers([]string{"core.service.infrabox.net"}) + cr.Status.State = corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "Container is being created", + }, + } + + err = sdk.Update(cr) + if err != nil { + logrus.Errorf("Failed to set finalizers: %v", err) + return err + } + } + + batch := c.newBatch(cr) + err := sdk.Get(batch) + + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to get batch jobs: %s", err.Error()) + return err + } + + // Create job if does not already exist + if err != nil && errors.IsNotFound(err) { + function, err := getFunction(cr.Spec.FunctionName, log) + + if err != nil { + logrus.Errorf("Failed to get function %v: ", err) + return err + } + + // TODO(validate) + err = sdk.Create(c.newBatchJob(cr, function)) + + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create job: %s", err.Error()) + return err + } + + log.Info("Batch job created") + + // Get job again so we can sync it + err = sdk.Get(batch) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to get batch job: %s", err.Error()) + return err + } + } + + pods := &corev1.PodList{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + } + + options := &metav1.ListOptions{ + LabelSelector: "function.infrabox.net/function-invocation-name=" + cr.Name, + } + err = sdk.List(cr.Namespace, pods, sdk.WithListOptions(options)) + + if err != nil { + log.Errorf("Failed to list pods: %v", err) + return err + } + + if len(pods.Items) != 0 { + pod := pods.Items[0] + if len(pod.Status.ContainerStatuses) != 0 { + cr.Status.State = pod.Status.ContainerStatuses[0].State + log.Info("Updating job status") + return sdk.Update(cr) + } + } + + return nil +} + +func (c *Controller) createBatchJob(fi *v1alpha1.IBFunctionInvocation, function *Function, log *logrus.Entry) error { + log.Infof("Creating Batch Job") + + log.Infof("Successfully created batch job") + return nil +} + +func (c *Controller) deletePods(fi *v1alpha1.IBFunctionInvocation, log *logrus.Entry) error { + pods := &corev1.PodList{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + } + + options := &metav1.ListOptions{ + LabelSelector: "function.infrabox.net/function-invocation-name=" + fi.Name, + } + err := sdk.List(fi.Namespace, pods, sdk.WithListOptions(options)) + + if err != nil { + log.Errorf("Failed to list pods: %v", err) + return err + } + + for _, pod := range pods.Items { + log.Infof("Deleting pod") + + err := sdk.Delete(&pod, sdk.WithDeleteOptions(metav1.NewDeleteOptions(0))) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to delete pod: %v", err) + return err + } + } + + return nil +} + +func (c *Controller) deleteFunctionInvocation(cr *v1alpha1.IBFunctionInvocation, log *logrus.Entry) error { + err := sdk.Delete(c.newBatch(cr), sdk.WithDeleteOptions(metav1.NewDeleteOptions(0))) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to delete batch function invocation: %v", err) + return err + } + + err = c.deletePods(cr, log) + if err != nil { + log.Errorf("Failed to delete pods: %v", err) + return err + } + + cr.SetFinalizers([]string{}) + err = sdk.Update(cr) + if err != nil { + logrus.Errorf("Failed to remove finalizers: %v", err) + return err + } + + return nil +} + +func (c *Controller) newBatchJob(fi *v1alpha1.IBFunctionInvocation, function *Function) *batchv1.Job { + f := false + + job := corev1.Container{ + Name: "function", + ImagePullPolicy: "Always", + Image: function.Spec.Image, + Resources: function.Spec.Resources, + Env: function.Spec.Env, + SecurityContext: function.Spec.SecurityContext, + VolumeMounts: function.Spec.VolumeMounts, + } + + job.VolumeMounts = append(job.VolumeMounts, fi.Spec.VolumeMounts...) + job.Env = append(job.Env, fi.Spec.Env...) + + if fi.Spec.Resources != nil { + job.Resources = *fi.Spec.Resources + } + + containers := []corev1.Container{ + job, + } + + var zero int32 = 0 + var zero64 int64 = 0 + var one int32 = 1 + batch := &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + APIVersion: "batch/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fi.Name, + Namespace: fi.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(fi, schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Version: v1alpha1.SchemeGroupVersion.Version, + Kind: "IBFunctionInvocation", + }), + }, + Labels: map[string]string{ + "function.infrabox.net/function-invocation-name": fi.Name, + }, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: &f, + Containers: containers, + RestartPolicy: "Never", + TerminationGracePeriodSeconds: &zero64, + Volumes: function.Spec.Volumes, + }, + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "function.infrabox.net/function-invocation-name": fi.Name, + }, + }, + }, + Completions: &one, + Parallelism: &one, + BackoffLimit: &zero, + }, + } + + batch.Spec.Template.Spec.Volumes = append(batch.Spec.Template.Spec.Volumes, fi.Spec.Volumes...) + + return batch +} + +func (c *Controller) newBatch(fi *v1alpha1.IBFunctionInvocation) *batchv1.Job { + return &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + APIVersion: "batch/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fi.Name, + Namespace: fi.Namespace, + }, + } +} diff --git a/src/controller/pkg/stub/handler.go b/src/controller/pkg/stub/handler.go new file mode 100644 index 000000000..7ca6dba09 --- /dev/null +++ b/src/controller/pkg/stub/handler.go @@ -0,0 +1,132 @@ +package stub + +import ( + "context" + "github.com/sap/infrabox/src/controller/pkg/apis/core/v1alpha1" + + "github.com/onrik/logrus/filename" + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +func NewHandler() sdk.Handler { + return &Controller{} +} + +func init() { + logrus.AddHook(filename.NewHook()) + // logrus.SetLevel(logrus.WarnLevel) +} + +type Controller struct{} + +func handleError(pi *v1alpha1.IBPipelineInvocation, err error) error { + if errors.IsConflict(err) { + // we just wait for the next update + return nil + } + + pi.Status.State = "error" + pi.Status.Message = err.Error() + err = sdk.Update(pi) + + if err != nil && errors.IsConflict(err) { + return err + } + + return err +} + +func (h *Controller) Handle(ctx context.Context, event sdk.Event) error { + switch o := event.Object.(type) { + case *v1alpha1.IBPipelineInvocation: + pi := o + if event.Deleted { + return nil + } + + log := logrus.WithFields(logrus.Fields{ + "namespace": pi.Namespace, + "name": pi.Name, + }) + + delTimestamp := pi.GetDeletionTimestamp() + if delTimestamp != nil { + return h.deletePipelineInvocation(pi, log) + } + + if pi.Status.State == "error" || pi.Status.State == "terminated" { + log.Info("pi terminated, ignoring") + return nil + } + + if pi.Status.State == "" || pi.Status.State == "preparing" { + err := h.preparePipelineInvocation(pi, log) + if err != nil { + return handleError(pi, err) + } + } + + if pi.Status.State == "running" || pi.Status.State == "scheduling" { + err := h.runPipelineInvocation(pi, log) + if err != nil { + return handleError(pi, err) + } + } + + if pi.Status.State == "finalizing" { + err := h.finalizePipelineInvocation(pi, log) + if err != nil { + return handleError(pi, err) + } + } + case *v1alpha1.IBFunctionInvocation: + ns := o + if event.Deleted { + return nil + } + + log := logrus.WithFields(logrus.Fields{ + "namespace": ns.Namespace, + "name": ns.Name, + }) + + delTimestamp := ns.GetDeletionTimestamp() + if delTimestamp != nil { + return h.deleteFunctionInvocation(ns, log) + } else { + err := h.syncFunctionInvocation(ns, log) + + if ns.Status.State.Terminated != nil { + log.Info("function terminated, ignoring") + return nil + } + + if err == nil { + return nil + } + + if errors.IsConflict(err) { + // we just wait for the next update + return nil + } + + // Update status in case of error + ns.Status.State.Terminated = &corev1.ContainerStateTerminated{ + ExitCode: 1, + Message: err.Error(), + } + + err = sdk.Update(ns) + + if err != nil && errors.IsConflict(err) { + return err + } + } + } + + return nil +} diff --git a/src/controller/pkg/stub/pipeline.go b/src/controller/pkg/stub/pipeline.go new file mode 100644 index 000000000..b7ddc9dab --- /dev/null +++ b/src/controller/pkg/stub/pipeline.go @@ -0,0 +1,475 @@ +package stub + +import ( + goerr "errors" + "github.com/sap/infrabox/src/controller/pkg/apis/core/v1alpha1" + "strconv" + + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/operator-framework/operator-sdk/pkg/k8sclient" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func (c *Controller) deletePipelineInvocation(cr *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + err := c.deleteServices(cr, log) + if err != nil { + log.Errorf("Failed to delete services: %v", err) + return err + } + + cr.SetFinalizers([]string{}) + err = updateStatus(cr, log) + if err != nil { + logrus.Errorf("Failed to remove finalizers: %v", err) + return err + } + + return nil +} + +func (c *Controller) deleteService(pi *v1alpha1.IBPipelineInvocation, service *v1alpha1.IBPipelineService, log *logrus.Entry, index int) error { + log.Infof("Deleting Service") + id := pi.Name + "-" + strconv.Itoa(index) + resourceClient, _, err := k8sclient.GetResourceClient(service.APIVersion, service.Kind, pi.Namespace) + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return err + } + + err = resourceClient.Delete(id, metav1.NewDeleteOptions(0)) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to delete service: %s", err.Error()) + return err + } + + return nil +} + +func (c *Controller) deleteServices(pi *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + if pi.Spec.Services == nil { + return nil + } + + log.Info("Delete additional services") + for index, s := range pi.Spec.Services { + l := log.WithFields(logrus.Fields{ + "service_version": s.APIVersion, + "service_kind": s.Kind, + }) + err := c.deleteService(pi, &s, l, index) + if err != nil { + return err + } + + l.Info("Service deleted") + } + + return nil +} + +func (c *Controller) areServicesDeleted(pi *v1alpha1.IBPipelineInvocation, log *logrus.Entry) (bool, error) { + if pi.Spec.Services == nil { + return true, nil + } + + log.Info("Delete additional services") + for index, s := range pi.Spec.Services { + id := pi.Name + "-" + strconv.Itoa(index) + resourceClient, _, err := k8sclient.GetResourceClient(s.APIVersion, s.Kind, pi.Namespace) + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return false, err + } + + service, err := resourceClient.Get(id, metav1.GetOptions{}) + log.Errorf("%v", err) + log.Errorf("%v", service) + + if err == nil { + // service still available + return false, err + } + + if err != nil { + if errors.IsNotFound(err) { + // already deleted + continue + } else { + return false, err + } + } + } + + return true, nil +} + +func updateStatus(pi *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + resourceClient, _, err := k8sclient.GetResourceClient(pi.APIVersion, pi.Kind, pi.Namespace) + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return err + } + + j, err := resourceClient.Get(pi.Name, metav1.GetOptions{}) + if err != nil { + log.Errorf("failed to get pi: %v", err) + return err + } + + j.Object["status"] = pi.Status + j.SetFinalizers(pi.GetFinalizers()) + _, err = resourceClient.Update(j) + + if err != nil { + return err + } + + return sdk.Get(pi) +} + +func (c *Controller) preparePipelineInvocation(cr *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + logrus.Info("Prepare") + cr.SetFinalizers([]string{"core.infrabox.net"}) + cr.Status.State = "preparing" + cr.Status.Message = "Services are being created" + err := updateStatus(cr, log) + + if err != nil { + log.Warnf("Failed to update status: %v", err) + return err + } + + servicesCreated, err := c.createServices(cr, log) + + if err != nil { + log.Errorf("Failed to create services: %s", err.Error()) + return err + } + + if servicesCreated { + log.Infof("Services are ready") + cr.Status.Message = "" + cr.Status.State = "scheduling" + } else { + log.Infof("Services not yet ready") + } + + log.Info("Updating state") + return updateStatus(cr, log) +} + +func (c *Controller) runPipelineInvocation(cr *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + logrus.Info("Run") + pipeline := newPipeline(cr) + err := sdk.Get(pipeline) + + if err != nil { + logrus.Errorf("Pipeline not found: ", cr.Spec.PipelineName) + return err + } + + // Sync all functions + for index, pipelineStep := range pipeline.Spec.Steps { + if len(cr.Status.StepStatuses) <= index { + // No state yet for this step + cr.Status.StepStatuses = append(cr.Status.StepStatuses, v1alpha1.IBFunctionInvocationStatus{ + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "Containers are being created", + }, + }, + }) + } + + status := &cr.Status.StepStatuses[index] + + if status.State.Terminated != nil { + // step already finished + log.Info("Step already finished") + continue + } + + stepInvocation, _ := cr.Spec.Steps[pipelineStep.Name] + + fi := newFunctionInvocation(cr, stepInvocation, &pipelineStep) + err = sdk.Create(fi) + + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create function invocation: %s", err.Error()) + return err + } + + fi = newFunctionInvocation(cr, stepInvocation, &pipelineStep) + err = sdk.Get(fi) + if err != nil { + return err + } + + cr.Status.StepStatuses[index] = fi.Status + if fi.Status.State.Terminated != nil { + // don't continue with next step until this one finished + break + } + } + + firstState := cr.Status.StepStatuses[0].State + + if firstState.Running != nil { + cr.Status.Message = "" + cr.Status.State = "running" + cr.Status.StartTime = &firstState.Running.StartedAt + } else if firstState.Terminated != nil { + cr.Status.Message = "" + cr.Status.State = "running" + cr.Status.StartTime = &firstState.Terminated.StartedAt + } + + // Determine current status + allTerminated := true + for _, stepStatus := range cr.Status.StepStatuses { + if stepStatus.State.Terminated == nil { + allTerminated = false + } + } + + if allTerminated { + cr.Status.Message = "" + cr.Status.State = "finalizing" + cr.Status.StartTime = &firstState.Terminated.StartedAt + cr.Status.CompletionTime = &cr.Status.StepStatuses[len(cr.Status.StepStatuses)-1].State.Terminated.FinishedAt + } + + return updateStatus(cr, log) +} + +func (c *Controller) finalizePipelineInvocation(cr *v1alpha1.IBPipelineInvocation, log *logrus.Entry) error { + log.Info("Finalizing") + + err := c.deleteServices(cr, log) + if err != nil { + log.Errorf("Failed to delete services: %v", err) + return err + } + + allServicesDeleted, err := c.areServicesDeleted(cr, log) + if err != nil { + return err + } + + if !allServicesDeleted { + return nil + } + + cr.Status.Message = "" + cr.Status.State = "terminated" + + return updateStatus(cr, log) +} + +func (c *Controller) createService(service *v1alpha1.IBPipelineService, pi *v1alpha1.IBPipelineInvocation, log *logrus.Entry, index int) (bool, error) { + resourceClient, _, err := k8sclient.GetResourceClient(pi.APIVersion, pi.Kind, pi.Namespace) + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return false, err + } + + j, err := resourceClient.Get(pi.Name, metav1.GetOptions{}) + if err != nil { + log.Errorf("failed to get pi: %v", err) + return false, err + } + + services, ok := unstructured.NestedSlice(j.Object, "spec", "services") + + if !ok { + return false, goerr.New("services not found") + } + + var spec *map[string]interface{} = nil + for _, ser := range services { + m := ser.(map[string]interface{}) + un := unstructured.Unstructured{Object: m} + name := un.GetName() + + if name == service.Metadata.Name { + newSpec, ok := unstructured.NestedMap(m, "spec") + + if !ok { + newSpec = make(map[string]interface{}) + } + + spec = &newSpec + } + } + + if spec == nil { + return false, goerr.New("service not found") + } + + id := pi.Name + "-" + strconv.Itoa(index) + newService := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": service.APIVersion, + "kind": service.Kind, + "metadata": map[string]interface{}{ + "name": id, + "namespace": pi.Namespace, + "annotations": service.Metadata.Annotations, + "labels": map[string]string{ + "service.infrabox.net/secret-name": id, + }, + }, + "spec": *spec, + }, + } + + resourceClient, _, err = k8sclient.GetResourceClient(service.APIVersion, service.Kind, pi.Namespace) + if err != nil { + log.Errorf("failed to get resource client: %v", err) + return false, err + } + + _, err = resourceClient.Create(newService) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to post service: %s", err.Error()) + return false, err + } + + log.Infof("Service %s/%s created", service.APIVersion, service.Kind) + + s, err := resourceClient.Get(id, metav1.GetOptions{}) + if err != nil { + return false, err + } + + status, ok := unstructured.NestedString(s.Object, "status", "status") + + if !ok { + return false, nil + } + + if status == "ready" { + return true, nil + } + + if status == "error" { + msg, ok := unstructured.NestedString(s.Object, "status", "message") + + if !ok { + msg = "Internal Error" + } + + log.Errorf("service is in state error: %s", msg) + return false, goerr.New(msg) + } + + return false, nil +} + +func (c *Controller) createServices(pi *v1alpha1.IBPipelineInvocation, log *logrus.Entry) (bool, error) { + if pi.Spec.Services == nil { + log.Info("No services specified") + return true, nil + } + + log.Info("Creating additional services") + + ready := true + for index, s := range pi.Spec.Services { + l := log.WithFields(logrus.Fields{ + "service_version": s.APIVersion, + "service_kind": s.Kind, + }) + + r, err := c.createService(&s, pi, l, index) + + if err != nil { + l.Errorf("Failed to create service: %s", err.Error()) + return false, err + } + + if r { + l.Info("Service ready") + } else { + ready = false + l.Infof("Service not yet ready") + } + } + + return ready, nil +} + +func newPipeline(cr *v1alpha1.IBPipelineInvocation) *v1alpha1.IBPipeline { + return &v1alpha1.IBPipeline{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.Group + "/" + v1alpha1.SchemeGroupVersion.Version, + Kind: "IBPipeline", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Spec.PipelineName, + Namespace: cr.Namespace, + }, + } +} + +func newFunctionInvocation(pi *v1alpha1.IBPipelineInvocation, + invocationStep v1alpha1.IBPipelineInvocationStep, + step *v1alpha1.IBPipelineStep) *v1alpha1.IBFunctionInvocation { + + fi := &v1alpha1.IBFunctionInvocation{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.Group + "/" + v1alpha1.SchemeGroupVersion.Version, + Kind: "IBFunctionInvocation", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pi.Name + "-" + step.Name, + Namespace: pi.Namespace, + OwnerReferences: newOwnerReferenceForPipelineInvocation(pi), + }, + Spec: v1alpha1.IBFunctionInvocationSpec{ + FunctionName: step.FunctionName, + Env: invocationStep.Env, + }, + } + + if invocationStep.Resources != nil { + fi.Spec.Resources = invocationStep.Resources + } + + for index, s := range pi.Spec.Services { + id := pi.Name + "-" + strconv.Itoa(index) + + fi.Spec.Volumes = append(fi.Spec.Volumes, corev1.Volume{ + Name: id, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: id, + }, + }, + }) + + fi.Spec.VolumeMounts = append(fi.Spec.VolumeMounts, corev1.VolumeMount{ + Name: id, + MountPath: "/var/run/infrabox.net/services/" + s.Metadata.Name, + }) + } + + return fi +} + +func newOwnerReferenceForPipelineInvocation(cr *v1alpha1.IBPipelineInvocation) []metav1.OwnerReference { + return []metav1.OwnerReference{ + *metav1.NewControllerRef(cr, schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Version: v1alpha1.SchemeGroupVersion.Version, + Kind: "IBPipelineInvocation", + }), + } +} diff --git a/src/controller/run.sh b/src/controller/run.sh deleted file mode 100755 index bde1be29b..000000000 --- a/src/controller/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -e -go build - -export INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES=true -export INFRABOX_LOCAL_CACHE_ENABLED=false -export INFRABOX_JOB_MAX_OUTPUT_SIZE=9999999 -export INFRABOX_JOB_MOUNT_DOCKER_SOCKET=false -export INFRABOX_JOB_DAEMON_JSON='{}' -export INFRABOX_ROOT_URL="http://localhost:8080" -export INFRABOX_TAG=latest -export INFRABOX_DOCKER_REGISTRY="quay.io/infrabox" -export INFRABOX_LOCAL_CACHE_HOST_PATH="" -export INFRABOX_GERRIT_ENABLED=false - -./controller -kubeconfig ~/.kube/config -logtostderr diff --git a/src/controller/tmp/build/Dockerfile b/src/controller/tmp/build/Dockerfile new file mode 100644 index 000000000..4a18142de --- /dev/null +++ b/src/controller/tmp/build/Dockerfile @@ -0,0 +1,5 @@ +FROM alpine:3.6 + +ADD tmp/_output/bin/controller /usr/local/bin/controller + +CMD /usr/local/bin/controller diff --git a/src/controller/tmp/build/build.sh b/src/controller/tmp/build/build.sh new file mode 100755 index 000000000..89a0a6807 --- /dev/null +++ b/src/controller/tmp/build/build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +if ! which go > /dev/null; then + echo "golang needs to be installed" + exit 1 +fi + +BIN_DIR="$(pwd)/tmp/_output/bin" +mkdir -p ${BIN_DIR} +PROJECT_NAME="controller" +REPO_PATH="github.com/sap/infrabox/src/controller" +BUILD_PATH="${REPO_PATH}/cmd/${PROJECT_NAME}" +echo "building "${PROJECT_NAME}"..." +GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ${BIN_DIR}/${PROJECT_NAME} $BUILD_PATH diff --git a/src/controller/tmp/build/docker_build.sh b/src/controller/tmp/build/docker_build.sh new file mode 100755 index 000000000..da98858d4 --- /dev/null +++ b/src/controller/tmp/build/docker_build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +if ! which docker > /dev/null; then + echo "docker needs to be installed" + exit 1 +fi + +: ${IMAGE:?"Need to set IMAGE, e.g. gcr.io//-operator"} + +echo "building container ${IMAGE}..." +docker build -t "${IMAGE}" -f tmp/build/Dockerfile . diff --git a/src/controller/tmp/codegen/boilerplate.go.txt b/src/controller/tmp/codegen/boilerplate.go.txt new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/src/controller/tmp/codegen/boilerplate.go.txt @@ -0,0 +1 @@ + diff --git a/src/controller/tmp/codegen/update-generated.sh b/src/controller/tmp/codegen/update-generated.sh new file mode 100755 index 000000000..799d3a0d2 --- /dev/null +++ b/src/controller/tmp/codegen/update-generated.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +DOCKER_REPO_ROOT="/go/src/github.com/sap/infrabox/src/controller" +IMAGE=${IMAGE:-"gcr.io/coreos-k8s-scale-testing/codegen:1.9.3"} + +docker run --rm \ + -v "$PWD":"$DOCKER_REPO_ROOT":Z \ + -w "$DOCKER_REPO_ROOT" \ + "$IMAGE" \ + "/go/src/k8s.io/code-generator/generate-groups.sh" \ + "deepcopy" \ + "github.com/sap/infrabox/src/controller/pkg/generated" \ + "github.com/sap/infrabox/src/controller/pkg/apis" \ + "core:v1alpha1" \ + --go-header-file "./tmp/codegen/boilerplate.go.txt" \ + $@ diff --git a/src/dashboard-client/src/components/job/Archive.vue b/src/dashboard-client/src/components/job/Archive.vue index 60a8cdc3b..84f764ced 100644 --- a/src/dashboard-client/src/components/job/Archive.vue +++ b/src/dashboard-client/src/components/job/Archive.vue @@ -12,7 +12,7 @@ {{ a.filename }} - {{ a.size / 1024 }} kb + {{ Math.round(a.size / 1024) }} kb diff --git a/src/dashboard-client/src/models/Job.js b/src/dashboard-client/src/models/Job.js index 831f7bf23..5bdc04229 100644 --- a/src/dashboard-client/src/models/Job.js +++ b/src/dashboard-client/src/models/Job.js @@ -107,6 +107,10 @@ export default class Job { } _addLines (lines) { + if (this.state === 'scheduled') { + this.state = 'running' + } + for (let line of lines) { if (line === '') { continue diff --git a/src/db/Dockerfile b/src/db/Dockerfile index 90fd77065..ab2b822f0 100644 --- a/src/db/Dockerfile +++ b/src/db/Dockerfile @@ -1,6 +1,7 @@ -FROM debian:9.3-slim +FROM debian:9.4-slim -RUN apt-get update -y && apt-get install -y python python-psycopg2 python-requests python-bcrypt python-crypto +RUN apt-get update -y && apt-get install -y python python-psycopg2 python-requests python-bcrypt python-crypto && \ + rm -rf /var/lib/apt/lists/* COPY src/db db COPY src/pyinfraboxutils /pyinfraboxutils diff --git a/src/db/migrations/00001.sql b/src/db/migrations/00001.sql index 15ed33819..cfce7607a 100644 --- a/src/db/migrations/00001.sql +++ b/src/db/migrations/00001.sql @@ -224,23 +224,6 @@ CREATE TABLE job_markup ( type markup_type NOT NULL ); - --- --- Name: job_stat; Type: TABLE; Schema: public; Owner: - --- - -CREATE TABLE job_stat ( - job_id uuid NOT NULL, - tests_added integer DEFAULT 0 NOT NULL, - tests_duration double precision NOT NULL, - tests_skipped integer DEFAULT 0 NOT NULL, - tests_failed integer DEFAULT 0 NOT NULL, - tests_error integer DEFAULT 0 NOT NULL, - tests_passed integer DEFAULT 0 NOT NULL, - project_id uuid NOT NULL -); - - -- -- Name: measurement; Type: TABLE; Schema: public; Owner: - -- @@ -430,15 +413,6 @@ ALTER TABLE ONLY job_markup ALTER TABLE ONLY job ADD CONSTRAINT job_pkey PRIMARY KEY (id); - --- --- Name: job_stat_pkey; Type: CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY job_stat - ADD CONSTRAINT job_stat_pkey PRIMARY KEY (job_id); - - -- -- Name: project_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- diff --git a/src/docker-registry/auth/server.py b/src/docker-registry/auth/server.py index 93001d274..36177bfaa 100644 --- a/src/docker-registry/auth/server.py +++ b/src/docker-registry/auth/server.py @@ -36,8 +36,8 @@ def v2(): state = r[0][0] - if state != 'running': - logger.warn('job not running anymore') + if state not in ('scheduled', 'running'): + logger.warn('job not running anymore: %s' % token['job']['id']) abort(401, 'Unauthorized') else: # pragma: no cover logger.warn('unsupported token type: %s' % token['type']) @@ -93,8 +93,8 @@ def v2_path(path): state = r[0] job_project_id = r[1] - if state != 'running': - logger.warn('job not running anymore') + if state not in ('scheduled', 'running'): + logger.warn('job not running anymore: %s' % token['job']['id']) abort(401, 'Unauthorized') if project_id != job_project_id: diff --git a/src/gerrit/api/Dockerfile b/src/gerrit/api/Dockerfile index 4d6ea3cfe..2e0cf4961 100644 --- a/src/gerrit/api/Dockerfile +++ b/src/gerrit/api/Dockerfile @@ -1,4 +1,4 @@ -from debian:8.9 +from debian:9.4-slim RUN apt-get update -y && \ apt-get install -y python-paramiko openssh-client python-requests python-bottle && \ diff --git a/src/gerrit/review/Dockerfile b/src/gerrit/review/Dockerfile index fe7fd219e..059d20e0c 100644 --- a/src/gerrit/review/Dockerfile +++ b/src/gerrit/review/Dockerfile @@ -1,4 +1,4 @@ -from debian:8.9 +from debian:9.4-slim RUN apt-get update -y && \ apt-get install -y python-psycopg2 python-paramiko openssh-client python-requests && \ diff --git a/src/gerrit/trigger/Dockerfile b/src/gerrit/trigger/Dockerfile index 02dc28950..de9bbeaa7 100644 --- a/src/gerrit/trigger/Dockerfile +++ b/src/gerrit/trigger/Dockerfile @@ -1,4 +1,4 @@ -from debian:8.9 +from debian:9.4-slim RUN apt-get update -y && \ apt-get install -y python-psycopg2 python-paramiko openssh-client python-requests && \ diff --git a/src/job/entrypoint.sh b/src/job/entrypoint.sh index 392cd2de8..b00eead56 100755 --- a/src/job/entrypoint.sh +++ b/src/job/entrypoint.sh @@ -1,11 +1,9 @@ #!/bin/bash -e mkdir -p /data/docker mkdir -p /data/infrabox +mkdir -p ~/.ssh if [ ! -e /var/run/docker.sock ]; then - mkdir -p /etc/docker - echo $INFRABOX_JOB_DAEMON_JSON > /etc/docker/daemon.json - echo "Waiting for docker daemon to start up" # Start docker daemon dockerd-entrypoint.sh --storage-driver overlay --data-root /data/docker & @@ -17,7 +15,7 @@ if [ ! -e /var/run/docker.sock ]; then sleep 1 if [ $COUNTER -gt 60 ]; then - echo "Docker daemon not started" + echo "Docker daemon not started" > '/dev/termination-log' exit 1 fi done @@ -25,4 +23,16 @@ else echo "Using host docker daemon socket" fi +if [ -f /tmp/gerrit/id_rsa ]; then + echo "Setting private key" + eval `ssh-agent -s` + cp /tmp/gerrit/id_rsa ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + echo "StrictHostKeyChecking no" > ~/.ssh/config + ssh-add ~/.ssh/id_rsa + ssh-keyscan -p $INFRABOX_GERRIT_PORT $INFRABOX_GERRIT_HOSTNAME >> ~/.ssh/known_hosts +else + echo "No private key configured" +fi + /job/job.py $@ diff --git a/src/job/git/Dockerfile b/src/job/git/Dockerfile deleted file mode 100644 index 50a7ecd4b..000000000 --- a/src/job/git/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM alpine:3.6 - -RUN apk add --no-cache python py2-flask git openssh-client py2-pip py2-gevent bash && \ - pip install flask_restplus && \ - apk del py2-pip - -ENV PYTHONPATH=/ - -COPY src/pyinfraboxutils /pyinfraboxutils -COPY src/job/git /git - -CMD /git/entrypoint.sh diff --git a/src/job/git/api.py b/src/job/git/api.py deleted file mode 100755 index cf2caa63a..000000000 --- a/src/job/git/api.py +++ /dev/null @@ -1,124 +0,0 @@ -#pylint: disable=wrong-import-position -import subprocess -import os -import traceback - -from gevent.wsgi import WSGIServer - -from flask import Flask, request -from flask_restplus import Api, Resource, fields - -from pyinfraboxutils import print_stackdriver, get_logger - -app = Flask(__name__) -api = Api(app) - -logger = get_logger('api') -ns = api.namespace('/', description='Clone repo') - -@ns.route('/ping') -class Ping(Resource): - def get(self): - return {'status': 200} - -clone_model = api.model('Clone', { - 'commit': fields.String(required=True, description='Commit'), - 'clone_url': fields.String(required=True, description='Clone URL'), - 'branch': fields.String(required=False, description='Branch'), - 'ref': fields.String(required=False, description='Ref'), - 'clone_all': fields.Boolean(required=False, description='Clone all'), - 'sub_path': fields.String(required=False, description='Sub path'), - 'submodules': fields.String(required=False, description='Init submodules') -}) - -@ns.route('/clone_repo') -class Clone(Resource): - def execute(self, args, cwd=None): - output = '\n' - output += ' '.join(args) - output += '\n' - output += subprocess.check_output(args, cwd=cwd, stderr=subprocess.STDOUT) - return output - - @api.expect(clone_model) - def post(self): - try: - output = "" - mount_repo_dir = os.environ.get('INFRABOX_JOB_REPO_MOUNT_PATH', '/repo') - - body = request.get_json() - commit = body['commit'] - clone_url = body['clone_url'] - branch = body.get('branch', None) - ref = body.get('ref', None) - clone_all = body.get('clone_all', False) - sub_path = body.get('sub_path', None) - submodules = body.get('submodules', True) - - if sub_path: - mount_repo_dir = os.path.join(mount_repo_dir, sub_path) - - if os.environ['INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES'] == 'true': - output += self.execute(('git', 'config', '--global', 'http.sslVerify', 'false')) - - cmd = ['git', 'clone'] - - if not clone_all: - cmd += ['--depth=10'] - - if branch: - cmd += ['--single-branch', '-b', branch] - - cmd += [clone_url, mount_repo_dir] - - exc = None - for _ in range(0, 2): - exc = None - try: - output += self.execute(cmd) - break - except Exception as e: - exc = e - - if exc: - raise exc - - if ref: - cmd = ['git', 'fetch', '--depth=10', clone_url, ref] - output += self.execute(cmd, cwd=mount_repo_dir) - - output += self.execute(['git', 'config', 'remote.origin.url', clone_url], cwd=mount_repo_dir) - output += self.execute(['git', 'config', 'remote.origin.fetch', '+refs/heads/*:refs/remotes/origin/*'], - cwd=mount_repo_dir) - - if not clone_all: - output += self.execute(['git', 'fetch', 'origin', commit], cwd=mount_repo_dir) - - cmd = ['git', 'checkout', '-qf', commit] - - #if not branch: - # cmd += ['-b', 'infrabox'] - - output += self.execute(cmd, cwd=mount_repo_dir) - - if submodules: - output += self.execute(['git', 'submodule', 'init'], cwd=mount_repo_dir) - output += self.execute(['git', 'submodule', 'update'], cwd=mount_repo_dir) - - return output - except subprocess.CalledProcessError as e: - return output + e.output + "\n" + str(e), 500 - except Exception as e: - return traceback.format_exc(), 500 - - -def main(): # pragma: no cover - logger.info('Starting Server') - http_server = WSGIServer(('0.0.0.0', 8080), app) - http_server.serve_forever() - -if __name__ == "__main__": # pragma: no cover - try: - main() - except: - print_stackdriver() diff --git a/src/job/git/entrypoint.sh b/src/job/git/entrypoint.sh deleted file mode 100755 index 668707f2a..000000000 --- a/src/job/git/entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -e -mkdir -p ~/.ssh - -if [ -f /tmp/gerrit/id_rsa ]; then - echo "Setting private key" - eval `ssh-agent -s` - cp /tmp/gerrit/id_rsa ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa - echo "StrictHostKeyChecking no" > ~/.ssh/config - ssh-add ~/.ssh/id_rsa - ssh-keyscan -p $INFRABOX_GERRIT_PORT $INFRABOX_GERRIT_HOSTNAME >> ~/.ssh/known_hosts -else - echo "No private key configured" -fi - -python /git/api.py diff --git a/src/job/infrabox_job/job.py b/src/job/infrabox_job/job.py index 3c24730e1..824e421d6 100644 --- a/src/job/infrabox_job/job.py +++ b/src/job/infrabox_job/job.py @@ -8,16 +8,12 @@ class Job(object): def __init__(self): - self.api_server = os.environ.get("INFRABOX_JOB_API_URL", None) + self.api_server = os.environ["INFRABOX_ROOT_URL"] + "/api/job" self.verify = True if os.environ.get('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES', 'false') == 'true': self.verify = False - if not self.api_server: - print "INFRABOX_JOB_API_URL not set" - sys.exit(1) - self.job = None self.project = None self.build = None @@ -122,17 +118,6 @@ def post_api_server(self, endpoint, data=None): time.sleep(1) - def set_running(self): - self.post_api_server('setrunning') - - def set_finished(self, state, message): - payload = { - 'state': state, - 'message': message - } - - self.post_api_server('setfinished', data=payload) - def post_stats(self, stat): payload = { "stats": stat @@ -206,9 +191,3 @@ def post_file_to_api_server(self, url, path, filename=None): return raise Failure('Failed to upload file: %s' % message) - - def update_status(self, status, message=None): - if status == "running": - return self.set_running() - - return self.set_finished(status, message) diff --git a/src/job/infrabox_job/process.py b/src/job/infrabox_job/process.py index a8281adb9..89087880f 100644 --- a/src/job/infrabox_job/process.py +++ b/src/job/infrabox_job/process.py @@ -15,7 +15,7 @@ def __init__(self): self.output = [] self.last_send = datetime.now() self.is_finish = False - self.enable_logging = False + self.enable_logging = True self.verify = True if os.environ.get('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES', 'false') == 'true': @@ -98,7 +98,7 @@ def flush(self): "output": buf } - api_server = os.environ["INFRABOX_JOB_API_URL"] + api_server = os.environ["INFRABOX_ROOT_URL"] + "/api/job" headers = { 'Authorization': 'token ' + os.environ['INFRABOX_JOB_TOKEN'] diff --git a/src/job/job.py b/src/job/job.py index 21acc6203..190264a03 100755 --- a/src/job/job.py +++ b/src/job/job.py @@ -1,6 +1,7 @@ #!/usr/bin/python #pylint: disable=too-many-lines,attribute-defined-outside-init,too-many-public-methods,too-many-locals import os +import sys import shutil import time import json @@ -150,38 +151,57 @@ def get_files_in_dir(self, d, ending=None): return result def clone_repo(self, commit, clone_url, branch, ref, clone_all, sub_path=None, submodules=True): - git_server = os.environ["INFRABOX_JOB_GIT_URL"] + c = self.console + mount_repo_dir = self.mount_repo_dir - while True: - try: - r = requests.get('%s/ping' % git_server, timeout=5) + if sub_path: + mount_repo_dir = os.path.join(mount_repo_dir, sub_path) + + if os.environ['INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES'] == 'true': + c.execute(('git', 'config', '--global', 'http.sslVerify', 'false'), show=True) + + cmd = ['git', 'clone'] + + if not clone_all: + cmd += ['--depth=10'] - if r.status_code == 200: - break - else: - self.console.collect(r.text, show=True) + if branch: + cmd += ['--single-branch', '-b', branch] + + cmd += [clone_url, mount_repo_dir] + + exc = None + for _ in range(0, 3): + exc = None + try: + c.execute(cmd, show=True) + break except Exception as e: - print e + exc = e + time.sleep(5) + + if exc: + raise exc + + if ref: + cmd = ['git', 'fetch', '--depth=10', clone_url, ref] + c.execute(cmd, cwd=mount_repo_dir, show=True) - time.sleep(1) + c.execute(['git', 'config', 'remote.origin.url', clone_url], cwd=mount_repo_dir, show=True) + c.execute(['git', 'config', 'remote.origin.fetch', '+refs/heads/*:refs/remotes/origin/*'], + cwd=mount_repo_dir, show=True) - d = { - 'commit': commit, - 'clone_url': clone_url, - 'branch': branch, - 'ref': ref, - 'clone_all': clone_all, - 'sub_path': sub_path, - 'submodules': submodules - } + if not clone_all: + c.execute(['git', 'fetch', 'origin', commit], cwd=mount_repo_dir, show=True) - r = requests.post('%s/clone_repo' % git_server, json=d, timeout=1800) + cmd = ['git', 'checkout', '-qf', commit] - for l in r.text.split('\\n'): - self.console.collect(l, show=True) + c.execute(cmd, cwd=mount_repo_dir, show=True) + + if submodules: + c.execute(['git', 'submodule', 'init'], cwd=mount_repo_dir, show=True) + c.execute(['git', 'submodule', 'update'], cwd=mount_repo_dir, show=True) - if r.status_code != 200: - raise Failure('Failed to clone repository') def get_source(self): c = self.console @@ -268,7 +288,6 @@ def check_container_crashed(self): f.write("started") def main(self): - self.update_status('running') self.load_data() # Show environment @@ -772,10 +791,6 @@ def run_docker_container(self, image_name): # Mount context cmd += ['-v', '%s:/infrabox/context' % self._get_build_context_current_job()] - # Mount docker socket - if os.environ['INFRABOX_JOB_MOUNT_DOCKER_SOCKET'] == 'true': - cmd += ['-v', '/var/run/docker.sock:/var/run/docker.sock'] - # Add local cache if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true': cmd += ['-v', "/local-cache:/infrabox/local-cache"] @@ -1195,23 +1210,27 @@ def main(): get_env('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES') get_env('INFRABOX_LOCAL_CACHE_ENABLED') get_env('INFRABOX_JOB_MAX_OUTPUT_SIZE') - get_env('INFRABOX_JOB_API_URL') - get_env('INFRABOX_JOB_GIT_URL') - get_env('INFRABOX_JOB_MOUNT_DOCKER_SOCKET') console = ApiConsole() j = None try: j = RunJob(console) j.main() - j.console.flush() j.console.header('Finished', show=True) - j.update_status('finished', message='Successfully finished') + j.console.flush() + + with open('/dev/termination-log', 'w+') as out: + out.write('Job finished successfully') + except Failure as e: j.console.header('Failure', show=True) j.console.collect(e.message, show=True) j.console.flush() - j.update_status('failure', message=e.message) + + with open('/dev/termination-log', 'w+') as out: + out.write(e.message) + + sys.exit(1) except: print_stackdriver() if j: @@ -1219,10 +1238,15 @@ def main(): msg = traceback.format_exc() j.console.collect(msg, show=True) j.console.flush() - j.update_status('error', message='An error occured') + + with open('/dev/termination-log', 'w+') as out: + out.write(msg) + + sys.exit(1) if __name__ == "__main__": try: main() except: print_stackdriver() + sys.exit(1) diff --git a/src/pyinfrabox/infrabox/__init__.py b/src/pyinfrabox/infrabox/__init__.py index 4a42b827a..f9fdc115e 100644 --- a/src/pyinfrabox/infrabox/__init__.py +++ b/src/pyinfrabox/infrabox/__init__.py @@ -160,19 +160,6 @@ def parse_limits(d, path): if d['memory'] <= 255: raise ValidationError(path + ".memory", "must be greater than 255") -def parse_kubernetes_limits(d, path): - check_allowed_properties(d, path, ("memory", "cpu")) - check_required_properties(d, path, ("memory", "cpu")) - - check_number(d['cpu'], path + ".cpu") - check_number(d['memory'], path + ".memory") - - if d['cpu'] <= 0: - raise ValidationError(path + ".cpu", "must be greater than 0") - - if d['memory'] <= 255: - raise ValidationError(path + ".memory", "must be greater than 255") - def parse_add_capabilities(d, path): check_string_array(d, path) @@ -191,13 +178,6 @@ def parse_security_context(d, path): if 'privileged' in d: check_boolean(d['privileged'], path + ".privileged") -def parse_service_spec(d, path): - if not isinstance(d, dict): - raise ValidationError(path, "must be an object") - - for key, value in d.items(): - check_text(value, path + "." + key) - def parse_services(d, path): if not isinstance(d, list): raise ValidationError(path, "must be an array") @@ -219,11 +199,8 @@ def parse_services(d, path): names.append(name) - if 'spec' in elem: - parse_service_spec(elem['spec'], p + ".spec") - def parse_resources(d, path): - check_allowed_properties(d, path, ("limits", "kubernetes")) + check_allowed_properties(d, path, ("limits",)) check_required_properties(d, path, ("limits",)) parse_limits(d['limits'], path + ".limits") diff --git a/src/pyinfraboxutils/ibflask.py b/src/pyinfraboxutils/ibflask.py index 1da856a8a..33764d35a 100644 --- a/src/pyinfraboxutils/ibflask.py +++ b/src/pyinfraboxutils/ibflask.py @@ -62,7 +62,7 @@ def get_token(): return token else: - logger.warn('No auth header') + logger.info('No auth header') abort(401, 'Unauthorized') try: diff --git a/src/scheduler/kubernetes/Dockerfile b/src/scheduler/kubernetes/Dockerfile index 563f075bd..8ab5c2508 100644 --- a/src/scheduler/kubernetes/Dockerfile +++ b/src/scheduler/kubernetes/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.6 +FROM alpine:3.7 RUN apk add --no-cache python3 py3-psycopg2 py3-requests py3-pip py3-cryptography ca-certificates && \ pip3 install PyJWT && \ diff --git a/src/scheduler/kubernetes/scheduler.py b/src/scheduler/kubernetes/scheduler.py index 7e792ffcd..770fa4920 100644 --- a/src/scheduler/kubernetes/scheduler.py +++ b/src/scheduler/kubernetes/scheduler.py @@ -1,19 +1,16 @@ -import logging import argparse import time import os +from datetime import datetime + import requests + import psycopg2 import psycopg2.extensions from pyinfraboxutils import get_logger, get_env, print_stackdriver from pyinfraboxutils.db import connect_db - -def gerrit_enabled(): - return os.environ['INFRABOX_GERRIT_ENABLED'] == 'true' - -def use_host_docker_daemon(): - return os.environ['INFRABOX_JOB_USE_HOST_DOCKER_DAEMON'] == 'true' +from pyinfraboxutils.token import encode_job_token class Scheduler(object): def __init__(self, conn, args): @@ -22,43 +19,66 @@ def __init__(self, conn, args): self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE") self.logger = get_logger("scheduler") - def kube_delete_namespace(self, job_id): - h = {'Authorization': 'Bearer %s' % self.args.token} - namespace_name = "ib-%s" % job_id - - # delete the namespace - p = {"gracePeriodSeconds": 0} - requests.delete(self.args.api_server + '/api/v1/namespaces/%s' % (namespace_name,), - headers=h, params=p, timeout=10) - - def kube_delete_job(self, job_id): h = {'Authorization': 'Bearer %s' % self.args.token} requests.delete(self.args.api_server + - '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibjobs/%s' % (self.namespace, job_id,), + '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibpipelineinvocations/%s' % (self.namespace, job_id,), headers=h, timeout=5) - def kube_job(self, job_id, cpu, mem, additional_env=None, services=None): + def kube_job(self, job_id, cpu, mem, services=None): h = {'Authorization': 'Bearer %s' % self.args.token} + + job_token = encode_job_token(job_id).decode() + + env = [{ + 'name': 'INFRABOX_JOB_ID', + 'value': job_id + }, { + 'name': 'INFRABOX_JOB_TOKEN', + 'value': job_token + }, { + 'name': 'INFRABOX_JOB_RESOURCES_LIMITS_MEMORY', + 'value': str(mem) + }, { + 'name': 'INFRABOX_JOB_RESOURCES_LIMITS_CPU', + 'value': str(cpu) + }] + + root_url = os.environ['INFRABOX_ROOT_URL'] + + if services: + for s in services: + if 'annotations' not in s['metadata']: + s['metadata']['annotations'] = {} + + s['metadata']['annotations']['infrabox.net/job-id'] = job_id + s['metadata']['annotations']['infrabox.net/job-token'] = job_token + s['metadata']['annotations']['infrabox.net/root-url'] = root_url + job = { 'apiVersion': 'core.infrabox.net/v1alpha1', - 'kind': 'IBJob', + 'kind': 'IBPipelineInvocation', 'metadata': { 'name': job_id }, 'spec': { - 'resources': { - 'limits': { - 'memory': '%sMi' % mem, - 'cpu': cpu - } - }, - 'env': additional_env, + 'pipelineName': 'infrabox-default-pipeline', 'services': services, + 'steps': { + 'run': { + 'resources': { + 'limits': { + 'memory': '%sMi' % mem, + 'cpu': cpu + } + }, + 'env': env, + } + } } } - r = requests.post(self.args.api_server + '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibjobs' % self.namespace, + r = requests.post(self.args.api_server + '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibpipelineinvocations' % self.namespace, headers=h, json=job, timeout=10) if r.status_code != 201: @@ -68,154 +88,6 @@ def kube_job(self, job_id, cpu, mem, additional_env=None, services=None): return True - def create_kube_namespace(self, job_id, _k8s_resources): - self.logger.info("Provisioning kubernetes namespace") - h = {'Authorization': 'Bearer %s' % self.args.token} - - namespace_name = "ib-%s" % job_id - ns = { - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": namespace_name, - "labels": { - "infrabox-resource": "kubernetes", - "infrabox-job-id": job_id, - } - } - } - - r = requests.post(self.args.api_server + '/api/v1/namespaces', - headers=h, json=ns, timeout=10) - - if r.status_code != 201: - self.logger.warn("Failed to create Namespace: %s", r.text) - return False - -# rq = { -# "apiVersion": "v1", -# "kind": "ResourceQuota", -# "metadata": { -# "name": "compute-resources", -# "namespace": namespace_name -# }, -# "spec": { -# "hard": { -# "limits.cpu": k8s_resources['limits']['cpu'], -# "limits.memory": k8s_resources['limits']['memory'] -# } -# } -# } -# -# #r = requests.post(self.args.api_server + '/api/v1/namespaces/' + namespace_name + '/resourcequotas', -# # headers=h, json=rq, timeout=10) -# -# if r.status_code != 201: -# self.logger.warn("Failed to create ResourceQuota: %s" % r.text) -# return False - - role = { - 'kind': 'Role', - 'apiVersion': 'rbac.authorization.k8s.io/v1beta1', - 'metadata': { - 'name': 'infrabox', - 'namespace': namespace_name - }, - 'rules': [{ - 'apiGroups': ['', 'extensions', 'apps', 'batch'], - 'resources': ['*'], - 'verbs': ['*'] - }, { - 'apiGroups': ['rbac.authorization.k8s.io'], - 'resources': ['roles', 'rolebindings'], - 'verbs': ['*'] - }, { - 'apiGroups': ['policy'], - 'resources': ['poddisruptionbudgets'], - 'verbs': ['*'] - }] - } - - r = requests.post(self.args.api_server + - '/apis/rbac.authorization.k8s.io/v1beta1/namespaces/%s/roles' % namespace_name, - headers=h, json=role, timeout=10) - - if r.status_code != 201: - self.logger.warn("Failed to create Role: %s", r.text) - return False - - rb = { - "kind": "RoleBinding", - "apiVersion": "rbac.authorization.k8s.io/v1beta1", - "metadata": { - "name": namespace_name - }, - "subjects": [{ - "kind": "ServiceAccount", - "name": "default", - "namespace": namespace_name - }], - "roleRef": { - "kind": "Role", - "name": "infrabox", - "apiGroup": "rbac.authorization.k8s.io" - } - } - - r = requests.post(self.args.api_server + - '/apis/rbac.authorization.k8s.io/v1beta1/namespaces/%s/rolebindings' % namespace_name, - headers=h, json=rb, timeout=10) - - if r.status_code != 201: - self.logger.warn("Failed to create RoleBinding: %s", r.text) - return False - - rb = { - "kind": "ClusterRoleBinding", - "apiVersion": "rbac.authorization.k8s.io/v1beta1", - "metadata": { - "name": namespace_name + '-discovery' - }, - "subjects": [{ - "kind": "ServiceAccount", - "name": "default", - "namespace": namespace_name - }], - "roleRef": { - "kind": "ClusterRole", - "name": "system:discover", - "apiGroup": "rbac.authorization.k8s.io" - } - } - - r = requests.post(self.args.api_server + - '/apis/rbac.authorization.k8s.io/v1beta1/namespaces/%s/rolebindings' % namespace_name, - headers=h, json=rb, timeout=10) - - if r.status_code != 201: - self.logger.warn("Failed to create RoleBinding for discovery: %s", r.text) - return False - - # find secret - r = requests.get(self.args.api_server + '/api/v1/namespaces/%s/secrets' % namespace_name, - headers=h, timeout=5) - - if r.status_code != 200: - self.logger.warn("Failed to get service account secret: %s", r.text) - return False - - data = r.json() - secret = data['items'][0] - - env = [ - {"name": "INFRABOX_RESOURCES_KUBERNETES_CA_CRT", "value": secret['data']['ca.crt']}, - {"name": "INFRABOX_RESOURCES_KUBERNETES_TOKEN", "value": secret['data']['token']}, - {"name": "INFRABOX_RESOURCES_KUBERNETES_NAMESPACE", "value": secret['data']['namespace']}, - {"name": "INFRABOX_RESOURCES_KUBERNETES_MASTER_URL", "value": self.args.api_server} - ] - - return env - def schedule_job(self, job_id, cpu, memory): cursor = self.conn.cursor() cursor.execute(''' @@ -224,26 +96,9 @@ def schedule_job(self, job_id, cpu, memory): j = cursor.fetchone() cursor.close() - resources = j[2] definition = j[3] cpu -= 0.2 - - additional_env = None - if resources and resources.get('kubernetes', None): - k8s = resources.get('kubernetes', None) - additional_env = self.create_kube_namespace(job_id, k8s) - - if not additional_env: - self.logger.warn('Failed to create kubernetes namespace') - cursor = self.conn.cursor() - cursor.execute(''' - UPDATE job - SET state = 'error', console = 'Failed to create kubernetes namespace' - WHERE id = %s''', [job_id]) - cursor.close() - return - self.logger.info("Scheduling job to kubernetes") services = None @@ -251,7 +106,7 @@ def schedule_job(self, job_id, cpu, memory): if definition and 'services' in definition: services = definition['services'] - if not self.kube_job(job_id, cpu, memory, additional_env=additional_env, services=services): + if not self.kube_job(job_id, cpu, memory, services=services): return cursor = self.conn.cursor() @@ -428,49 +283,11 @@ def handle_timeouts(self): cursor.close() - def handle_orphaned_namespaces(self): - h = {'Authorization': 'Bearer %s' % self.args.token} - r = requests.get(self.args.api_server + '/api/v1/namespaces', headers=h, timeout=10) - data = r.json() - - if 'items' not in data: - self.logger.warn('No data returned') - return - - for j in data['items']: - metadata = j.get('metadata', None) - if not metadata: - continue - - labels = metadata.get('labels', None) - if not labels: - continue - - for key in labels: - if key != 'infrabox-job-id': - continue - - job_id = labels[key] - - cursor = self.conn.cursor() - cursor.execute('''SELECT state FROM job where id = %s''', (job_id,)) - result = cursor.fetchall() - cursor.close() - - if len(result) != 1: - continue - - state = result[0][0] - - if state in ('queued', 'scheduled', 'running'): - continue - - self.logger.info('Deleting orphaned namespace ib-%s', job_id) - self.kube_delete_namespace(job_id) - def handle_orphaned_jobs(self): + self.logger.debug("Handling orphaned jobs") + h = {'Authorization': 'Bearer %s' % self.args.token} - r = requests.get(self.args.api_server + '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibjobs' % self.namespace, + r = requests.get(self.args.api_server + '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibpipelineinvocations' % self.namespace, headers=h, timeout=10) data = r.json() @@ -482,10 +299,6 @@ def handle_orphaned_jobs(self): if 'metadata' not in j: continue - if 'deletionTimestamp' in j['metadata']: - # Already marked for deletion - continue - metadata = j['metadata'] name = metadata['name'] job_id = name @@ -500,30 +313,82 @@ def handle_orphaned_jobs(self): self.kube_delete_job(job_id) continue - state = result[0][0] - if state in ('queued', 'scheduled', 'running'): - status = j.get('status', {}).get('status', None) + last_state = result[0][0] - if not status: - continue + start_date = None + end_date = None + delete_job = False + current_state = 'scheduled' + message = None - if status == 'error': - message = j['status']['message'] + if j.get('status', None): + status = j['status'] + s = status.get('state', "preparing") + message = status.get('message', None) - if not message: - message = "Internal Controller Error" + if s in ["preparing", "scheduling"]: + current_state = 'scheduled' - cursor = self.conn.cursor() - cursor.execute(""" - UPDATE job SET state = 'error', message = %s, end_date = current_timestamp - WHERE id = %s AND state IN ('scheduled', 'running', 'queued') - """, (message, job_id,)) - cursor.close() - else: - continue + if s in ["running", "finalizing"]: + current_state = 'running' - self.logger.info('Deleting orphaned job %s', job_id) - self.kube_delete_job(job_id) + if s == "terminated": + current_state = 'error' + + if 'stepStatuses' in status and status['stepStatuses']: + stepStatus = status['stepStatuses'][-1] + exit_code = stepStatus['State']['terminated']['exitCode'] + + if exit_code == 0: + current_state = 'finished' + else: + current_state = 'failure' + message = stepStatus['State']['terminated']['message'] + + delete_job = True + + if s == "error": + current_state = 'error' + delete_job = True + start_date = datetime.now() + end_date = datetime.now() + + start_date = status.get('startTime', None) + end_date = status.get('completionTime', None) + + if last_state == current_state: + continue + + cursor = self.conn.cursor() + cursor.execute(""" + UPDATE job SET state = %s, start_date = %s, end_date = %s, message = %s + WHERE id = %s + """, (current_state, start_date, end_date, message, job_id)) + cursor.close() + + if delete_job: + # collect console output + cursor = self.conn.cursor() + cursor.execute(""" + SELECT output FROM console WHERE job_id = %s + ORDER BY date + """, [job_id]) + lines = cursor.fetchall() + cursor.close() + + output = "" + for l in lines: + output += l[0] + + cursor = self.conn.cursor() + cursor.execute(""" + UPDATE job SET console = %s WHERE id = %s; + DELETE FROM console WHERE job_id = %s; + """, [output, job_id, job_id]) + cursor.close() + + self.logger.info('Deleting job %s', job_id) + self.kube_delete_job(job_id) def update_cluster_state(self): cluster_name = os.environ['INFRABOX_CLUSTER_NAME'] @@ -582,7 +447,6 @@ def handle(self): self.handle_timeouts() self.handle_aborts() self.handle_orphaned_jobs() - self.handle_orphaned_namespaces() except Exception as e: self.logger.exception(e) @@ -594,6 +458,8 @@ def handle(self): self.schedule() def run(self): + self.logger.info("Starting scheduler") + while True: self.handle() time.sleep(2) @@ -619,14 +485,6 @@ def main(): get_env('INFRABOX_ROOT_URL') get_env('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES') get_env('INFRABOX_GENERAL_WORKER_NAMESPACE') - get_env('INFRABOX_JOB_MAX_OUTPUT_SIZE') - get_env('INFRABOX_JOB_MOUNT_DOCKER_SOCKET') - get_env('INFRABOX_JOB_SECURITY_CONTEXT_CAPABILITIES_ENABLED') - - if get_env('INFRABOX_GERRIT_ENABLED') == 'true': - get_env('INFRABOX_GERRIT_USERNAME') - get_env('INFRABOX_GERRIT_HOSTNAME') - get_env('INFRABOX_GERRIT_PORT') # try to read from filesystem with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as f: diff --git a/src/services/gcp/Gopkg.lock b/src/services/gcp/Gopkg.lock index c5eb7c55c..9d93a679d 100644 --- a/src/services/gcp/Gopkg.lock +++ b/src/services/gcp/Gopkg.lock @@ -180,7 +180,6 @@ "pkg/sdk/action", "pkg/sdk/handler", "pkg/sdk/informer", - "pkg/sdk/query", "pkg/sdk/types", "pkg/util/k8sutil", "version" @@ -199,6 +198,12 @@ revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" +[[projects]] + name = "github.com/satori/go.uuid" + packages = ["."] + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -420,6 +425,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "85f853467422a2f028a4ff971e358eb699389795cc9676ccca6ec415851ceedb" + inputs-digest = "ece2853603b3b547d6bda2e289470180fc9cb2a57080755b46a6850d71339310" solver-name = "gps-cdcl" solver-version = 1 diff --git a/src/services/gcp/README.md b/src/services/gcp/README.md index 8c106214b..fa5b3a5e2 100644 --- a/src/services/gcp/README.md +++ b/src/services/gcp/README.md @@ -21,14 +21,14 @@ The InfraBox GCP Service can be used to dynamically provision a Kubernetes Clust "name": "my-cluster" }, "spec": { - "diskSize": "100", + "diskSize": 100, "machineType": "n1-standard-1", - "enableNetworkPolicy": "false", - "numNodes": "1", - "preemptible": "true", - "enableAutoscaling": "false", - "maxNodes": "1", - "minNodes": "1", + "enableNetworkPolicy": false, + "numNodes": 1, + "preemptible": true, + "enableAutoscaling": false, + "maxNodes": 1, + "minNodes": 1, "zone": "us-east1-b" } }] diff --git a/src/services/gcp/deploy/operator.yaml b/src/services/gcp/deploy/operator.yaml new file mode 100644 index 000000000..239dcd778 --- /dev/null +++ b/src/services/gcp/deploy/operator.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gcp +spec: + replicas: 1 + selector: + matchLabels: + name: gcp + template: + metadata: + labels: + name: gcp + spec: + containers: + - name: gcp + image: 192.168.1.31:5000/infrabox/service-gcp + command: + - gcp + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/src/services/gcp/infrabox-service-gcp/templates/crd.yaml b/src/services/gcp/infrabox-service-gcp/templates/crd.yaml index 70857b0f4..77f7e0078 100644 --- a/src/services/gcp/infrabox-service-gcp/templates/crd.yaml +++ b/src/services/gcp/infrabox-service-gcp/templates/crd.yaml @@ -9,3 +9,35 @@ spec: kind: GKECluster plural: gkeclusters scope: Namespaced + validation: + openAPIV3Schema: + required: + - spec + properties: + spec: + required: + - zone + properties: + diskSize: + type: integer + minimum: 10 + machineType: + type: string + enableNetworkPolicy: + type: boolean + numNodes: + type: integer + minimum: 1 + preemptible: + type: boolean + enableAutoscaling: + type: boolean + maxNodes: + type: integer + minNodes: + type: integer + minimum: 1 + zone: + type: string + clusterVersion: + type: string diff --git a/src/services/gcp/pkg/apis/gcp/v1alpha1/types.go b/src/services/gcp/pkg/apis/gcp/v1alpha1/types.go index 8b3b6fa8d..e556ffb4f 100644 --- a/src/services/gcp/pkg/apis/gcp/v1alpha1/types.go +++ b/src/services/gcp/pkg/apis/gcp/v1alpha1/types.go @@ -16,20 +16,22 @@ type GKECluster struct { } type GKEClusterSpec struct { - DiskSize string `json:"diskSize,omitempty"` - MachineType string `json:"machineType,omitempty"` - EnableNetworkPolicy string `json:"enableNetworkPolicy,omitempty"` - NumNodes string `json:"numNodes,omitempty"` - Preemptible string `json:"preemptible,omitempty"` - EnableAutoscaling string `json:"enableAutoscaling,omitempty"` - MaxNodes string `json:"maxNodes,omitempty"` - MinNodes string `json:"minNodes,omitempty"` - Zone string `json:"zone"` + DiskSize int32 `json:"diskSize,omitempty"` + MachineType string `json:"machineType,omitempty"` + EnableNetworkPolicy bool `json:"enableNetworkPolicy,omitempty"` + NumNodes int32 `json:"numNodes,omitempty"` + Preemptible bool `json:"preemptible,omitempty"` + EnableAutoscaling bool `json:"enableAutoscaling,omitempty"` + MaxNodes int32 `json:"maxNodes,omitempty"` + MinNodes int32 `json:"minNodes,omitempty"` + ClusterVersion string `json:"clusterVersion,omitempty"` + Zone string `json:"zone"` } type GKEClusterStatus struct { - Status string `json:"status"` - Message string `json:"message"` + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + ClusterName string `json:"clusterName,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/src/services/gcp/pkg/stub/handler.go b/src/services/gcp/pkg/stub/handler.go index 3a02744a5..d3e2dc2ef 100644 --- a/src/services/gcp/pkg/stub/handler.go +++ b/src/services/gcp/pkg/stub/handler.go @@ -1,19 +1,44 @@ package stub import ( + "bytes" + "crypto/tls" + "crypto/x509" b64 "encoding/base64" "encoding/json" + "fmt" "github.com/sap/infrabox/src/services/gcp/pkg/apis/gcp/v1alpha1" + "github.com/satori/go.uuid" + "io/ioutil" + "mime/multipart" + "net/http" "os/exec" + "strconv" + "strings" + + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "github.com/operator-framework/operator-sdk/pkg/sdk/action" "github.com/operator-framework/operator-sdk/pkg/sdk/handler" "github.com/operator-framework/operator-sdk/pkg/sdk/types" + "github.com/operator-framework/operator-sdk/pkg/util/k8sutil" + "github.com/sirupsen/logrus" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + + appsv1 "k8s.io/api/apps/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" ) type MasterAuth struct { @@ -35,46 +60,45 @@ func NewHandler() handler.Handler { return &Handler{} } -type Handler struct {} +type Handler struct{} -func syncGKECluster(cr *v1alpha1.GKECluster) (*v1alpha1.GKEClusterStatus, error) { - if cr.Status.Status == "ready" || cr.Status.Status == "error" { - return &cr.Status, nil - } +func syncGKECluster(cr *v1alpha1.GKECluster, log *logrus.Entry) (*v1alpha1.GKEClusterStatus, error) { + if cr.Status.Status == "ready" || cr.Status.Status == "error" { + return &cr.Status, nil + } - finalizers := cr.GetFinalizers() - if len(finalizers) == 0 { - cr.SetFinalizers([]string{"gcp.service.infrabox.net"}) - cr.Status.Status = "pending" - err := action.Update(cr) - if err != nil { - logrus.Errorf("Failed to set finalizers: %v", err) - return nil, err - } - } + finalizers := cr.GetFinalizers() + if len(finalizers) == 0 { + cr.SetFinalizers([]string{"gcp.service.infrabox.net"}) + cr.Status.Status = "pending" + u := uuid.NewV4() + + cr.Status.ClusterName = "ib-" + u.String() + err := action.Update(cr) + if err != nil { + log.Errorf("Failed to set finalizers: %v", err) + return nil, err + } + } // Get the GKE Cluster - gkecluster, err := getRemoteCluster(cr.Name) + gkecluster, err := getRemoteCluster(cr.Status.ClusterName, log) if err != nil && !errors.IsNotFound(err) { - logrus.Errorf("Could not get GKE Cluster: %v", err) - return nil, err + log.Errorf("Could not get GKE Cluster: %v", err) + return nil, err } if gkecluster == nil { - name := "ib-" + cr.Name args := []string{"container", "clusters", - "create", name, "--async", "--enable-autorepair"} - - args = append(args, "--zone") - if cr.Spec.Zone != "" { - args = append(args, cr.Spec.Zone) - } else { - args = append(args, "us-east1-b") - } + "create", cr.Status.ClusterName, + "--async", + "--enable-autorepair", + "--zone", cr.Spec.Zone, + } - if cr.Spec.DiskSize != "" { + if cr.Spec.DiskSize != 0 { args = append(args, "--disk-size") - args = append(args, cr.Spec.DiskSize) + args = append(args, strconv.Itoa(int(cr.Spec.DiskSize))) } if cr.Spec.MachineType != "" { @@ -82,96 +106,124 @@ func syncGKECluster(cr *v1alpha1.GKECluster) (*v1alpha1.GKEClusterStatus, error) args = append(args, cr.Spec.MachineType) } - if cr.Spec.EnableNetworkPolicy == "true" { + if cr.Spec.EnableNetworkPolicy { args = append(args, "--enable-network-policy") } - if cr.Spec.NumNodes != "" { + if cr.Spec.NumNodes != 0 { args = append(args, "--num-nodes") - args = append(args, cr.Spec.NumNodes) + args = append(args, strconv.Itoa(int(cr.Spec.NumNodes))) } - if cr.Spec.Preemptible == "true" { + if cr.Spec.Preemptible { args = append(args, "--preemptible") } - if cr.Spec.EnableAutoscaling == "true" { + if cr.Spec.EnableAutoscaling { args = append(args, "--enable-autoscaling") - if cr.Spec.MaxNodes != "" { + if cr.Spec.MaxNodes != 0 { args = append(args, "--max-nodes") - args = append(args, cr.Spec.MaxNodes) + args = append(args, strconv.Itoa(int(cr.Spec.MaxNodes))) } - if cr.Spec.MinNodes != "" { + if cr.Spec.MinNodes != 0 { args = append(args, "--min-nodes") - args = append(args, cr.Spec.MinNodes) + args = append(args, strconv.Itoa(int(cr.Spec.MinNodes))) } } + if cr.Spec.ClusterVersion != "" { + // find out the exact cluster version + version, err := getExactClusterVersion(cr, log) + + if err != nil { + return nil, err + } + + args = append(args, "--cluster-version", version) + } + cmd := exec.Command("gcloud", args...) out, err := cmd.CombinedOutput() if err != nil { - logrus.Errorf("Failed to create GKE Cluster: %v", err) - logrus.Error(string(out)) + log.Errorf("Failed to create GKE Cluster: %v", err) + log.Error(string(out)) return nil, err } - return newClusterStatus("pending", "Cluser is being created"), nil + status := cr.Status + status.Status = "pending" + status.Message = "Cluster is being created" + return &status, nil } else { if err != nil { - logrus.Errorf("Failed to create secret: %v", err) + log.Errorf("Failed to create secret: %v", err) return nil, err } if gkecluster.Status == "RUNNING" { - err = action.Create(newSecret(cr, gkecluster)) - if err != nil && !errors.IsAlreadyExists(err) { - logrus.Errorf("Failed to create secret: %v", err) - return nil, err - } + err = injectCollector(gkecluster, log) + if err != nil { + log.Errorf("Failed to inject collector: %v", err) + return nil, err + } + + err = action.Create(newSecret(cr, gkecluster)) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create secret: %v", err) + return nil, err + } - return newClusterStatus("ready", "Cluster ready"), nil + status := cr.Status + status.Status = "ready" + status.Message = "Cluster ready" + return &status, nil } } - return &cr.Status, nil + return &cr.Status, nil } -func deleteGKECluster(cr *v1alpha1.GKECluster) error { - cr.Status.Status = "pending" - cr.Status.Message = "deleting" +func deleteGKECluster(cr *v1alpha1.GKECluster, log *logrus.Entry) error { + cr.Status.Status = "pending" + cr.Status.Message = "deleting" - err := action.Update(cr) - if err != nil { - logrus.Errorf("Failed to set finalizers: %v", err) - return err - } + err := action.Update(cr) + if err != nil { + log.Errorf("Failed to update status: %v", err) + return err + } // Get the GKE Cluster - gkecluster, err := getRemoteCluster(cr.Name) + gkecluster, err := getRemoteCluster(cr.Status.ClusterName, log) if err != nil && !errors.IsNotFound(err) { - logrus.Errorf("Failed to get GKE Cluster: %v", err) - return err + log.Errorf("Failed to get GKE Cluster: %v", err) + return err } if gkecluster != nil { - // Cluster still exists, delete it - cmd := exec.Command("gcloud", "-q", "container", "clusters", "delete", "ib-"+cr.Name, "--async", "--zone", cr.Spec.Zone) - out, err := cmd.CombinedOutput() + if gkecluster.Status == "RUNNING" { + // only try it once when the cluster is still running + retrieveLogs(cr, gkecluster, log) + } + + // Cluster still exists, delete it + cmd := exec.Command("gcloud", "-q", "container", "clusters", "delete", cr.Status.ClusterName, "--async", "--zone", cr.Spec.Zone) + out, err := cmd.CombinedOutput() - if err != nil { - logrus.Errorf("Failed to delete cluster: %v", err) - logrus.Error(string(out)) - return err - } + if err != nil { + log.Errorf("Failed to delete cluster: %v", err) + log.Error(string(out)) + return err + } - return nil + return nil } secretName := cr.ObjectMeta.Labels["service.infrabox.net/secret-name"] - secret := v1.Secret{ + secret := v1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", APIVersion: "v1", @@ -182,72 +234,110 @@ func deleteGKECluster(cr *v1alpha1.GKECluster) error { }, } - err = action.Delete(&secret) - if err != nil && !errors.IsNotFound(err) { - logrus.Errorf("Failed to delete secret: %v", err) - return err - } + err = action.Delete(&secret) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to delete secret: %v", err) + return err + } - cr.SetFinalizers([]string{}) - err = action.Update(cr) - if err != nil { - logrus.Errorf("Failed to remove finalizers: %v", err) - return err - } + cr.SetFinalizers([]string{}) + err = action.Update(cr) + if err != nil { + log.Errorf("Failed to remove finalizers: %v", err) + return err + } - err = action.Delete(cr) - if err != nil && !errors.IsNotFound(err) { - logrus.Errorf("Failed to delete cr: %v", err) - return err - } + err = action.Delete(cr) + if err != nil && !errors.IsNotFound(err) { + log.Errorf("Failed to delete cr: %v", err) + return err + } - return nil + return nil } func (h *Handler) Handle(ctx types.Context, event types.Event) error { switch o := event.Object.(type) { case *v1alpha1.GKECluster: - ns := o - if event.Deleted { - return nil - } - - delTimestamp := ns.GetDeletionTimestamp() - if delTimestamp != nil { - return deleteGKECluster(ns) - } else { - status, err := syncGKECluster(ns) - - if err != nil { - ns.Status.Status = "error" - ns.Status.Message = err.Error() - err = action.Update(ns) - return err - } else { - if ns.Status.Status != status.Status || ns.Status.Message != status.Message { - ns.Status = *status - err = action.Update(ns) - return err - } - } - } + ns := o + if event.Deleted { + return nil + } + + log := logrus.WithFields(logrus.Fields{ + "namespace": ns.Namespace, + "name": ns.Name, + }) + + delTimestamp := ns.GetDeletionTimestamp() + if delTimestamp != nil { + return deleteGKECluster(ns, log) + } else { + status, err := syncGKECluster(ns, log) + + if err != nil { + ns.Status.Status = "error" + ns.Status.Message = err.Error() + err = action.Update(ns) + return err + } else { + if ns.Status.Status != status.Status || ns.Status.Message != status.Message { + ns.Status = *status + err = action.Update(ns) + return err + } + } + } } return nil } func getLabels(cr *v1alpha1.GKECluster) map[string]string { - return map[string]string {} + return map[string]string{} +} + +type ServerConfig struct { + ValidMasterVersions []string `json:"validMasterVersions"` + ValidNodeVersions []string `json:"validNodeVersions"` +} + +func getExactClusterVersion(cr *v1alpha1.GKECluster, log *logrus.Entry) (string, error) { + cmd := exec.Command("gcloud", "container", "get-server-config", + "--format", "json", + "--zone", cr.Spec.Zone) + + out, err := cmd.Output() + + if err != nil { + log.Errorf("Could not get server config: %v", err) + return "", err + } + + var config ServerConfig + err = json.Unmarshal(out, &config) + + if err != nil { + log.Errorf("Could not parse cluster config: %v", err) + return "", err + } + + for _, v := range config.ValidMasterVersions { + if strings.HasPrefix(v, cr.Spec.ClusterVersion) { + return v, nil + } + } + + return "", fmt.Errorf("Could not find a valid cluster version match for %v", cr.Spec.ClusterVersion) } -func getRemoteCluster(name string) (*RemoteCluster, error) { +func getRemoteCluster(name string, log *logrus.Entry) (*RemoteCluster, error) { cmd := exec.Command("gcloud", "container", "clusters", "list", - "--filter", "name=ib-"+name, "--format", "json") + "--filter", "name="+name, "--format", "json") - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { - logrus.Errorf("Cloud not list clusters: %v", err) - logrus.Error(string(out)) + log.Errorf("Could not list clusters: %v", err) return nil, err } @@ -255,7 +345,7 @@ func getRemoteCluster(name string) (*RemoteCluster, error) { err = json.Unmarshal(out, &gkeclusters) if err != nil { - logrus.Errorf("Cloud not parse cluster list: %v", err) + log.Errorf("Could not parse cluster list: %v", err) return nil, err } @@ -266,13 +356,6 @@ func getRemoteCluster(name string) (*RemoteCluster, error) { return &gkeclusters[0], nil } -func newClusterStatus(status, message string) *v1alpha1.GKEClusterStatus { - return &v1alpha1.GKEClusterStatus { - Status: status, - Message: message, - } -} - func newSecret(cluster *v1alpha1.GKECluster, gke *RemoteCluster) *v1.Secret { caCrt, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClusterCaCertificate) clientKey, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClientKey) @@ -308,4 +391,460 @@ func newSecret(cluster *v1alpha1.GKECluster, gke *RemoteCluster) *v1.Secret { } } +func doCollectorRequest(cluster *RemoteCluster, log *logrus.Entry, endpoint string) (*[]byte, error) { + caCrt, _ := b64.StdEncoding.DecodeString(cluster.MasterAuth.ClusterCaCertificate) + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCrt) + + tlsConfig := &tls.Config{ + RootCAs: caCertPool, + } + tlsConfig.BuildNameToCertificate() + transport := &http.Transport{TLSClientConfig: tlsConfig} + client := &http.Client{Transport: transport} + + req, err := http.NewRequest("GET", "https://"+cluster.Endpoint+"/api/v1/namespaces/infrabox-collector/services/infrabox-collector-api:80/proxy"+endpoint, nil) + if err != nil { + log.Errorf("Failed to create new request: %v", err) + return nil, err + } + + req.SetBasicAuth(cluster.MasterAuth.Username, cluster.MasterAuth.Password) + + resp, err := client.Do(req) + if err != nil { + log.Errorf("Failed to GET remote pod list: %v", err) + return nil, err + } + + bodyText, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Errorf("Failed to read response body: %v", err) + return nil, err + } + + return &bodyText, nil +} + +func uploadToArchive(cr *v1alpha1.GKECluster, log *logrus.Entry, data *[]byte, filename string) error { + annotations := cr.GetAnnotations() + root_url, _ := annotations["infrabox.net/root-url"] + job_token, _ := annotations["infrabox.net/job-token"] + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile(filename, filename) + if err != nil { + log.Warningf("Failed to create form file: %v", err) + return err + } + + part.Write(*data) + err = writer.Close() + if err != nil { + log.Warningf("Failed to clise writer: %v", err) + return err + } + + log.Error(body) + req, err := http.NewRequest("POST", root_url+"/api/job/archive", body) + + if err != nil { + log.Warningf("Failed to create request: %v", err) + return err + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + req.Header.Set("Authorization", "token "+job_token) + client := &http.Client{} + response, err := client.Do(req) + + if err != nil { + log.Warningf("Failed to execute request: %v", err) + return err + } + + bodyText, err := ioutil.ReadAll(response.Body) + log.Info(string(bodyText)) + + return nil +} + +type CollectedPod struct { + NamespaceID string `json:"namespace_id"` + PodID string `json:"pod_id"` + Pod string `json:"pod_name"` + Containers []string `json:"containers"` + Namespace string `json:"namespace_name"` +} + +func retrieveLogs(cr *v1alpha1.GKECluster, cluster *RemoteCluster, log *logrus.Entry) { + log.Info("Collecting data from remote cluster") + + annotations := cr.GetAnnotations() + _, ok := annotations["infrabox.net/root-url"] + if !ok { + log.Warning("infrabox.net/root-url not set, not retrieving logs") + return + } + + _, ok = annotations["infrabox.net/job-id"] + if !ok { + log.Warning("infrabox.net/job-id not set, not retrieving logs") + return + } + + _, ok = annotations["infrabox.net/job-token"] + if !ok { + log.Warning("infrabox.net/job-token not set, not retrieving logs") + return + } + + var pods []CollectedPod + data, err := doCollectorRequest(cluster, log, "/api/pods") + + if err != nil { + log.Errorf("Failed to get collected pod list: %v", err) + return + } + + log.Info(string(*data)) + + err = json.Unmarshal(*data, &pods) + if err != nil { + log.Errorf("Failed to collected pod list: %v", err) + return + } + + for _, pod := range pods { + for _, container := range pod.Containers { + log.Info("Collecting logs for pod: ", pod.PodID) + data, err := doCollectorRequest(cluster, log, "/api/pods/"+pod.PodID+"/log/"+container) + + if err != nil { + log.Warningf("Failed to get collected pod logs: %v", err) + continue + } + + filename := "pod_" + pod.Namespace + "_" + pod.Pod + "_" + pod.PodID + ".txt" + err = uploadToArchive(cr, log, data, filename) + if err != nil { + log.Warningf("Failed to upload log to archive: %v", err) + continue + } + } + } +} + +func injectCollector(cluster *RemoteCluster, log *logrus.Entry) error { + client, err := newRemoteClusterSDK(cluster, log) + + if err != nil { + log.Errorf("Failed to create remote cluster client: %v", err) + return err + } + + err = client.Create(newCollectorNamespace(), log) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create collector deployment: %v", err) + return err + } + + err = client.Create(newCollectorCRB(), log) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create collector crb: %v", err) + return err + } + + err = client.Create(newCollectorDeployment(), log) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create collector deployment: %v", err) + return err + } + + err = client.Create(newCollectorService(), log) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create collector service: %v", err) + return err + } + + err = client.Create(newCollectorDaemonSet(), log) + if err != nil && !errors.IsAlreadyExists(err) { + log.Errorf("Failed to create collector daemon set: %v", err) + return err + } + + return nil +} + +type RemoteClusterSDK struct { + kubeConfig *rest.Config + cluster *RemoteCluster + clientPool dynamic.ClientPool + restMapper *discovery.DeferredDiscoveryRESTMapper +} + +func (r *RemoteClusterSDK) Create(object types.Object, log *logrus.Entry) (err error) { + _, namespace, err := k8sutil.GetNameAndNamespace(object) + + if err != nil { + log.Errorf("Failed to get namespace: %v", err) + return err + } + + gvk := object.GetObjectKind().GroupVersionKind() + apiVersion, kind := gvk.ToAPIVersionAndKind() + + resourceClient, _, err := r.getRemoteResourceClient(apiVersion, kind, namespace) + if err != nil { + return fmt.Errorf("failed to get resource client: %v", err) + } + + unstructObj := k8sutil.UnstructuredFromRuntimeObject(object) + unstructObj, err = resourceClient.Create(unstructObj) + if err != nil { + log.Errorf("Failed to create object: %v", err) + return err + } + + // Update the arg object with the result + err = k8sutil.UnstructuredIntoRuntimeObject(unstructObj, object) + if err != nil { + return fmt.Errorf("failed to unmarshal the retrieved data: %v", err) + } + + return nil +} + +func newRemoteClusterSDK(cluster *RemoteCluster, log *logrus.Entry) (*RemoteClusterSDK, error) { + caCrt, err := b64.StdEncoding.DecodeString(cluster.MasterAuth.ClusterCaCertificate) + clientKey, _ := b64.StdEncoding.DecodeString(cluster.MasterAuth.ClientKey) + clientCrt, _ := b64.StdEncoding.DecodeString(cluster.MasterAuth.ClientCertificate) + + if err != nil { + return nil, err + } + + tlsClientConfig := rest.TLSClientConfig{} + tlsClientConfig.CAData = caCrt + tlsClientConfig.CertData = clientCrt + tlsClientConfig.KeyData = clientKey + + kubeConfig := &rest.Config{ + Host: cluster.Endpoint, + TLSClientConfig: tlsClientConfig, + Username: cluster.MasterAuth.Username, + Password: cluster.MasterAuth.Password, + } + + kubeClient := kubernetes.NewForConfigOrDie(kubeConfig) + + cachedDiscoveryClient := cached.NewMemCacheClient(kubeClient.Discovery()) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient, meta.InterfacesForUnstructured) + restMapper.Reset() + kubeConfig.ContentConfig = dynamic.ContentConfig() + clientPool := dynamic.NewClientPool(kubeConfig, restMapper, dynamic.LegacyAPIPathResolverFunc) + + return &RemoteClusterSDK{ + kubeConfig: kubeConfig, + clientPool: clientPool, + cluster: cluster, + restMapper: restMapper, + }, nil +} + +func apiResource(gvk schema.GroupVersionKind, restMapper *discovery.DeferredDiscoveryRESTMapper) (*metav1.APIResource, error) { + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("failed to get the resource REST mapping for GroupVersionKind(%s): %v", gvk.String(), err) + } + resource := &metav1.APIResource{ + Name: mapping.Resource, + Namespaced: mapping.Scope == meta.RESTScopeNamespace, + Kind: gvk.Kind, + } + return resource, nil +} + +func (r *RemoteClusterSDK) getRemoteResourceClient(apiVersion, kind, namespace string) (dynamic.ResourceInterface, string, error) { + gv, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return nil, "", fmt.Errorf("failed to parse apiVersion: %v", err) + } + + gvk := schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: kind, + } + + client, err := r.clientPool.ClientForGroupVersionKind(gvk) + if err != nil { + return nil, "", fmt.Errorf("failed to get client for GroupVersionKind(%s): %v", gvk.String(), err) + } + resource, err := apiResource(gvk, r.restMapper) + if err != nil { + return nil, "", fmt.Errorf("failed to get resource type: %v", err) + } + pluralName := resource.Name + resourceClient := client.Resource(resource, namespace) + return resourceClient, pluralName, nil +} + +func newCollectorNamespace() *v1.Namespace { + return &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "infrabox-collector", + }, + } +} + +func newCollectorCRB() *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "infrabox-collector-crb", + Namespace: "infrabox-collector", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: "infrabox-collector", + }}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "cluster-admin", + APIGroup: "rbac.authorization.k8s.io", + }, + } +} +func newCollectorService() *v1.Service { + return &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "infrabox-collector-api", + Namespace: "infrabox-collector", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "http", + Port: 80, + TargetPort: intstr.FromInt(8080), + }}, + Selector: map[string]string{ + "app": "api.collector.infrabox.net", + }, + }, + } +} + +func newCollectorDeployment() *appsv1.Deployment { + var replicas int32 = 1 + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "infrabox-collector-api", + Namespace: "infrabox-collector", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "api.collector.infrabox.net", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "api.collector.infrabox.net", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "api", + Image: "quay.io/infrabox/collector-api", + }}, + }, + }, + }, + } +} + +func newCollectorDaemonSet() *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "extensions/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "infrabox-collector-fluentd", + Namespace: "infrabox-collector", + }, + Spec: appsv1.DaemonSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "fluentd.collector.infrabox.net", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "fluentd", + Image: "quay.io/infrabox/collector-fluentd", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "memory": resource.MustParse("200Mi"), + }, + Requests: v1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("100Mi"), + }, + }, + VolumeMounts: []v1.VolumeMount{{ + Name: "varlog", + MountPath: "/var/log", + }, { + Name: "varlibdockercontainers", + MountPath: "/var/lib/docker/containers", + ReadOnly: true, + }}, + Env: []v1.EnvVar{{ + Name: "INFRABOX_COLLECTOR_ENDPOINT", + Value: "http://infrabox-collector-api.infrabox-collector/api/log", + }}, + }}, + Volumes: []v1.Volume{{ + Name: "varlibdockercontainers", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/lib/docker/containers", + }, + }, + }, { + Name: "varlog", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/log", + }, + }, + }}, + }, + }, + }, + } +} diff --git a/src/services/gcp/tmp/build/Dockerfile b/src/services/gcp/tmp/build/Dockerfile index ed9a91070..ada0a64d4 100644 --- a/src/services/gcp/tmp/build/Dockerfile +++ b/src/services/gcp/tmp/build/Dockerfile @@ -1,6 +1,29 @@ -FROM alpine:3.6 +FROM alpine:3.7 +ENV CLOUD_SDK_VERSION 198.0.0 -RUN adduser -D namespace -USER namespace +ENV PATH /google-cloud-sdk/bin:$PATH -ADD tmp/_output/bin/namespace /usr/local/bin/namespace +RUN apk --no-cache add \ + curl \ + python \ + py-crcmod \ + bash \ + libc6-compat \ + openssh-client \ + git \ + && curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ + tar xzf google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ + rm google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ + ln -s /lib /lib64 && \ + gcloud config set core/disable_usage_reporting true && \ + gcloud config set component_manager/disable_update_check true && \ + gcloud config set metrics/environment github_docker_image && \ + gcloud config set container/use_v1_api false && \ + gcloud config set container/new_scopes_behavior true && \ + gcloud components install kubectl && \ + gcloud --version + +WORKDIR /app +ADD tmp/_output/bin/gcp /app/gcp + +ENTRYPOINT ./gcp