diff --git a/Development.adoc b/Development.adoc index 81dc823..9323ea5 100644 --- a/Development.adoc +++ b/Development.adoc @@ -55,6 +55,7 @@ odo push + ---- ansible-playbook \ +-e ansible_python_interpreter='{{ ansible_playbook_python }}' \ -e poolboy_domain=poolboy.dev.local \ -e poolboy_namespace=poolboy-dev \ -e poolboy_service_account=default \ @@ -123,6 +124,7 @@ helm template helm \ + ---- ansible-playbook \ +-e ansible_python_interpreter='{{ ansible_playbook_python }}' \ -e poolboy_domain=poolboy.dev.local \ -e poolboy_namespace=poolboy-dev \ -e poolboy_service_account=poolboy-dev \ diff --git a/README.adoc b/README.adoc index 9bf0e64..6638f08 100644 --- a/README.adoc +++ b/README.adoc @@ -23,7 +23,7 @@ image::docs/diagram.png[Poolboy Diagram,500,400] . User updates an existing ResourceClaim . Each resource template in the ResourceClaim is checked for validity against the OpenAPIv3 schema in the ResourceProvider. -. A JSON Patch for each resource template in the ResourceHandle is generated and then the JSON Patch is filtered according to each ResourceProvider's `spec.updateFilters`. +. A JSON Patch for each resource template in the ResourceHandle is generated and then the JSON Patch is filtered according to each ResourceProvider's `spec.updateFilters`. . Updates are applied to the ResourceHandle. === ResourceHandle Creation or Update diff --git a/helm/crds/resourceclaims.yaml b/helm/crds/resourceclaims.yaml index a1c3c74..18c43dc 100644 --- a/helm/crds/resourceclaims.yaml +++ b/helm/crds/resourceclaims.yaml @@ -89,7 +89,7 @@ spec: provider: description: >- ResourceProvider specification used to manage this claim. - Mutually exclusive with provider. + Mutually exclusive with resources. type: object required: - name @@ -100,7 +100,7 @@ spec: type: string parameterValues: description: >- - Parameter values used with this ResourcProvider. + Parameter values used with the ResourceProvider. type: object x-kubernetes-preserve-unknown-fields: true resources: @@ -163,6 +163,9 @@ spec: diffBase: description: Kopf diffbase type: string + healthy: + description: Health state as determined by check from ResourceProviders of resources. + type: boolean kopf: description: Kopf status type: object @@ -234,11 +237,14 @@ spec: namespace: type: string resources: - description: Status of resources managed for this claim + description: Status of resources managed for this ResourceClaim type: array items: type: object properties: + healthy: + description: Health state as determined by check from ResourceProvider. + type: boolean name: description: >- A name used to identify the resource. @@ -255,6 +261,26 @@ spec: type: string namespace: type: string + ready: + description: Readiness state as determined by check from ResourceProvider. + type: boolean + reference: + description: >- + Reference to managed resource. + type: object + required: + - apiVersion + - kind + - name + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + namespace: + type: string state: description: Resource state synchronized from managed resource type: object @@ -270,6 +296,9 @@ spec: - Linked ResourceProvider - Resource Definition type: string + ready: + description: Readiness state as determined by check from ResourceProviders of resources. + type: boolean summary: description: >- Status summary from current resources state, generated from ResourceProvider configuration. diff --git a/helm/crds/resourcehandles.yaml b/helm/crds/resourcehandles.yaml index 9d75994..27537b4 100644 --- a/helm/crds/resourcehandles.yaml +++ b/helm/crds/resourcehandles.yaml @@ -16,9 +16,18 @@ spec: subresources: status: {} additionalPrinterColumns: + - name: Provider + type: string + jsonPath: .spec.provider.name - name: Pool type: string jsonPath: .spec.resourcePool.name + - name: Healthy + type: boolean + jsonPath: .status.healthy + - name: Ready + type: boolean + jsonPath: .status.ready - name: Claim Namespace type: string jsonPath: .spec.resourceClaim.namespace @@ -56,8 +65,6 @@ spec: spec: description: ResourceHandle specification type: object - required: - - resources properties: lifespan: description: >- @@ -90,6 +97,24 @@ spec: Ex: "3d" for 3 days. This value may be a template string. type: string + provider: + description: >- + ResourceProvider specification used to generate resources rather than + explicitly list spec. + Mutually exclusive with resources. + type: object + required: + - name + properties: + name: + description: >- + ResourceProvider name. + type: string + parameterValues: + description: >- + Parameter values used with the ResourceProvider to generate resources list. + type: object + x-kubernetes-preserve-unknown-fields: true resourceClaim: description: >- ResourceClaim reference for claim matched to this ResourceHandle when the handle has been claimed. @@ -159,7 +184,8 @@ spec: type: string reference: description: >- - Reference to managed resource + Reference to managed resource. + (Deprecated in spec in favor of status). type: object required: - apiVersion @@ -180,14 +206,6 @@ spec: claim's template is used to manage the handle template. type: object x-kubernetes-preserve-unknown-fields: true - waitingFor: - description: >- - Indication indicating that resource creation is blocked waiting on a condition. - enum: - - ResourceClaim - - Linked ResourceProvider - - Resource Definition - type: string vars: description: >- Variables to use when evaluating validation checks and templates. @@ -202,7 +220,59 @@ spec: diffBase: description: Kopf diffbase type: string + healthy: + description: Health state as determined by check from ResourceProviders of resources. + type: boolean kopf: description: Kopf status type: object x-kubernetes-preserve-unknown-fields: true + ready: + description: Readiness state as determined by check from ResourceProviders of resources. + type: boolean + resources: + description: Status of resources managed by this ResourceHandle + type: array + items: + type: object + properties: + healthy: + description: Health state as determined by check from ResourceProvider. + type: boolean + name: + description: >- + A name used to identify the resource. + type: string + ready: + description: Readiness state as determined by check from ResourceProvider. + type: boolean + reference: + description: >- + Reference to managed resource. + type: object + required: + - apiVersion + - kind + - name + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + waitingFor: + description: >- + Indication indicating that resource creation is blocked waiting on a condition. + enum: + - ResourceClaim + - Linked ResourceProvider + - Resource Definition + type: string + summary: + description: >- + Status summary from current resources state, generated from ResourceProvider configuration. + type: object + x-kubernetes-preserve-unknown-fields: true diff --git a/helm/crds/resourcepools.yaml b/helm/crds/resourcepools.yaml index 5df04e3..536d77c 100644 --- a/helm/crds/resourcepools.yaml +++ b/helm/crds/resourcepools.yaml @@ -16,9 +16,18 @@ spec: subresources: status: {} additionalPrinterColumns: + - name: ResourceProvider + jsonPath: .spec.provider.name + type: string - name: Min type: integer jsonPath: .spec.minAvailable + - name: Available + type: integer + jsonPath: .status.resourceHandleCount.available + - name: Ready + type: integer + jsonPath: .status.resourceHandleCount.ready - name: Age type: date jsonPath: .metadata.creationTimestamp @@ -47,9 +56,11 @@ spec: spec: description: ResourcePool specification type: object - required: - - resources properties: + deleteUnhealthyResourceHandles: + description: >- + If set then any unbound ResourceHandle that fails health check will be automatically deleted. + type: boolean lifespan: description: >- Lifespan configuration for ResourceHandle provisioned by the ResourcePool. @@ -83,12 +94,35 @@ spec: Ex: "3d" for 3 days. type: string pattern: ^[0-9]+[smhd]$ + maxUnready: + description: >- + Maximum number of resource handles that do not pass readiness check. + type: integer + minimum: 0 minAvailable: description: >- Minimum number of unclaimed resource handles to maintain for the ResourcePool. type: integer minimum: 0 + provider: + description: >- + ResourceProvider specification used to generate resources rather than + explicitly list spec. + Mutually exclusive with resources. + type: object + required: + - name + properties: + name: + description: >- + ResourceProvider name. + type: string + parameterValues: + description: >- + Parameter values used with the ResourceProvider to generate resources list. + type: object + x-kubernetes-preserve-unknown-fields: true resources: description: >- Resources description to apply to ResourceHandles for the pool. @@ -141,3 +175,21 @@ spec: description: Kopf status type: object x-kubernetes-preserve-unknown-fields: true + resourceHandleCount: + type: object + properties: + available: + type: integer + ready: + type: integer + resourceHandles: + type: array + items: + type: object + properties: + healthy: + type: boolean + name: + type: string + ready: + type: boolean diff --git a/helm/crds/resourceproviders.yaml b/helm/crds/resourceproviders.yaml index 52d9801..af90bee 100644 --- a/helm/crds/resourceproviders.yaml +++ b/helm/crds/resourceproviders.yaml @@ -76,6 +76,12 @@ spec: If set to true, then ResourceHandle creation is disabled for any ResourceClaim using this ResourceProvider. type: boolean + healthCheck: + description: >- + Check to determine if provisioned resources are healthy. + Any pooled ResourceHandle that fails its health check will not be assigned to a ResourceClaim. + Syntax is a template that should return a boolean. + type: string lifespan: description: >- Used to generate lifespan configuration for ResourceHandles using the ResourceProvider. @@ -102,6 +108,13 @@ spec: Ex: "3d" for 3 days. This value may be a template string. type: string + unclaimed: + description: >- + Lifespan applied to ResourceHandles in the pool to allow for replacement of unused resources. + Configured as a whole number followed by units "s", "m", "h", or "d" for seconds, minutes, hours, or days. + Ex: "3d" for 3 days. + type: string + pattern: ^[0-9]+[smhd]$ linkedResourceProviders: description: >- List of ResourceProviders this one depends upon. @@ -236,6 +249,11 @@ spec: fields to specific numeric ranges. type: object x-kubernetes-preserve-unknown-fields: true + readinessCheck: + description: >- + Check to determine when provisioned resources are ready. + Syntax is a template that should return a boolean. + type: string resourceClaimAnnotations: description: >- Annotations to apply to ResourceClaim. diff --git a/helm/templates/crds/resourceclaims.yaml b/helm/templates/crds/resourceclaims.yaml index 2529852..fdbec8d 100644 --- a/helm/templates/crds/resourceclaims.yaml +++ b/helm/templates/crds/resourceclaims.yaml @@ -90,7 +90,7 @@ spec: provider: description: >- ResourceProvider specification used to manage this claim. - Mutually exclusive with provider. + Mutually exclusive with resources. type: object required: - name @@ -101,7 +101,7 @@ spec: type: string parameterValues: description: >- - Parameter values used with this ResourcProvider. + Parameter values used with the ResourceProvider. type: object x-kubernetes-preserve-unknown-fields: true resources: @@ -164,6 +164,9 @@ spec: diffBase: description: Kopf diffbase type: string + healthy: + description: Health state as determined by check from ResourceProviders of resources. + type: boolean kopf: description: Kopf status type: object @@ -235,11 +238,14 @@ spec: namespace: type: string resources: - description: Status of resources managed for this claim + description: Status of resources managed for this ResourceClaim type: array items: type: object properties: + healthy: + description: Health state as determined by check from ResourceProvider. + type: boolean name: description: >- A name used to identify the resource. @@ -256,6 +262,26 @@ spec: type: string namespace: type: string + ready: + description: Readiness state as determined by check from ResourceProvider. + type: boolean + reference: + description: >- + Reference to managed resource. + type: object + required: + - apiVersion + - kind + - name + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + namespace: + type: string state: description: Resource state synchronized from managed resource type: object @@ -271,6 +297,9 @@ spec: - Linked ResourceProvider - Resource Definition type: string + ready: + description: Readiness state as determined by check from ResourceProviders of resources. + type: boolean summary: description: >- Status summary from current resources state, generated from ResourceProvider configuration. diff --git a/helm/templates/crds/resourcehandles.yaml b/helm/templates/crds/resourcehandles.yaml index 7c0d5c5..ee1ee1e 100644 --- a/helm/templates/crds/resourcehandles.yaml +++ b/helm/templates/crds/resourcehandles.yaml @@ -17,9 +17,18 @@ spec: subresources: status: {} additionalPrinterColumns: + - name: Provider + type: string + jsonPath: .spec.provider.name - name: Pool type: string jsonPath: .spec.resourcePool.name + - name: Healthy + type: boolean + jsonPath: .status.healthy + - name: Ready + type: boolean + jsonPath: .status.ready - name: Claim Namespace type: string jsonPath: .spec.resourceClaim.namespace @@ -57,8 +66,6 @@ spec: spec: description: ResourceHandle specification type: object - required: - - resources properties: lifespan: description: >- @@ -91,6 +98,24 @@ spec: Ex: "3d" for 3 days. This value may be a template string. type: string + provider: + description: >- + ResourceProvider specification used to generate resources rather than + explicitly list spec. + Mutually exclusive with resources. + type: object + required: + - name + properties: + name: + description: >- + ResourceProvider name. + type: string + parameterValues: + description: >- + Parameter values used with the ResourceProvider to generate resources list. + type: object + x-kubernetes-preserve-unknown-fields: true resourceClaim: description: >- ResourceClaim reference for claim matched to this ResourceHandle when the handle has been claimed. @@ -160,7 +185,8 @@ spec: type: string reference: description: >- - Reference to managed resource + Reference to managed resource. + (Deprecated in spec in favor of status). type: object required: - apiVersion @@ -181,14 +207,6 @@ spec: claim's template is used to manage the handle template. type: object x-kubernetes-preserve-unknown-fields: true - waitingFor: - description: >- - Indication indicating that resource creation is blocked waiting on a condition. - enum: - - ResourceClaim - - Linked ResourceProvider - - Resource Definition - type: string vars: description: >- Variables to use when evaluating validation checks and templates. @@ -203,8 +221,60 @@ spec: diffBase: description: Kopf diffbase type: string + healthy: + description: Health state as determined by check from ResourceProviders of resources. + type: boolean kopf: description: Kopf status type: object x-kubernetes-preserve-unknown-fields: true + ready: + description: Readiness state as determined by check from ResourceProviders of resources. + type: boolean + resources: + description: Status of resources managed by this ResourceHandle + type: array + items: + type: object + properties: + healthy: + description: Health state as determined by check from ResourceProvider. + type: boolean + name: + description: >- + A name used to identify the resource. + type: string + ready: + description: Readiness state as determined by check from ResourceProvider. + type: boolean + reference: + description: >- + Reference to managed resource. + type: object + required: + - apiVersion + - kind + - name + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + waitingFor: + description: >- + Indication indicating that resource creation is blocked waiting on a condition. + enum: + - ResourceClaim + - Linked ResourceProvider + - Resource Definition + type: string + summary: + description: >- + Status summary from current resources state, generated from ResourceProvider configuration. + type: object + x-kubernetes-preserve-unknown-fields: true {{- end -}} diff --git a/helm/templates/crds/resourcepools.yaml b/helm/templates/crds/resourcepools.yaml index f666d1e..e36ab00 100644 --- a/helm/templates/crds/resourcepools.yaml +++ b/helm/templates/crds/resourcepools.yaml @@ -17,9 +17,18 @@ spec: subresources: status: {} additionalPrinterColumns: + - name: ResourceProvider + jsonPath: .spec.provider.name + type: string - name: Min type: integer jsonPath: .spec.minAvailable + - name: Available + type: integer + jsonPath: .status.resourceHandleCount.available + - name: Ready + type: integer + jsonPath: .status.resourceHandleCount.ready - name: Age type: date jsonPath: .metadata.creationTimestamp @@ -48,9 +57,11 @@ spec: spec: description: ResourcePool specification type: object - required: - - resources properties: + deleteUnhealthyResourceHandles: + description: >- + If set then any unbound ResourceHandle that fails health check will be automatically deleted. + type: boolean lifespan: description: >- Lifespan configuration for ResourceHandle provisioned by the ResourcePool. @@ -84,12 +95,35 @@ spec: Ex: "3d" for 3 days. type: string pattern: ^[0-9]+[smhd]$ + maxUnready: + description: >- + Maximum number of resource handles that do not pass readiness check. + type: integer + minimum: 0 minAvailable: description: >- Minimum number of unclaimed resource handles to maintain for the ResourcePool. type: integer minimum: 0 + provider: + description: >- + ResourceProvider specification used to generate resources rather than + explicitly list spec. + Mutually exclusive with resources. + type: object + required: + - name + properties: + name: + description: >- + ResourceProvider name. + type: string + parameterValues: + description: >- + Parameter values used with the ResourceProvider to generate resources list. + type: object + x-kubernetes-preserve-unknown-fields: true resources: description: >- Resources description to apply to ResourceHandles for the pool. @@ -142,4 +176,22 @@ spec: description: Kopf status type: object x-kubernetes-preserve-unknown-fields: true + resourceHandleCount: + type: object + properties: + available: + type: integer + ready: + type: integer + resourceHandles: + type: array + items: + type: object + properties: + healthy: + type: boolean + name: + type: string + ready: + type: boolean {{- end -}} diff --git a/helm/templates/crds/resourceproviders.yaml b/helm/templates/crds/resourceproviders.yaml index ecd17dd..cf45bc8 100644 --- a/helm/templates/crds/resourceproviders.yaml +++ b/helm/templates/crds/resourceproviders.yaml @@ -77,6 +77,12 @@ spec: If set to true, then ResourceHandle creation is disabled for any ResourceClaim using this ResourceProvider. type: boolean + healthCheck: + description: >- + Check to determine if provisioned resources are healthy. + Any pooled ResourceHandle that fails its health check will not be assigned to a ResourceClaim. + Syntax is a template that should return a boolean. + type: string lifespan: description: >- Used to generate lifespan configuration for ResourceHandles using the ResourceProvider. @@ -103,6 +109,13 @@ spec: Ex: "3d" for 3 days. This value may be a template string. type: string + unclaimed: + description: >- + Lifespan applied to ResourceHandles in the pool to allow for replacement of unused resources. + Configured as a whole number followed by units "s", "m", "h", or "d" for seconds, minutes, hours, or days. + Ex: "3d" for 3 days. + type: string + pattern: ^[0-9]+[smhd]$ linkedResourceProviders: description: >- List of ResourceProviders this one depends upon. @@ -237,6 +250,11 @@ spec: fields to specific numeric ranges. type: object x-kubernetes-preserve-unknown-fields: true + readinessCheck: + description: >- + Check to determine when provisioned resources are ready. + Syntax is a template that should return a boolean. + type: string resourceClaimAnnotations: description: >- Annotations to apply to ResourceClaim. diff --git a/operator/kopfobject.py b/operator/kopfobject.py new file mode 100644 index 0000000..d27ec1c --- /dev/null +++ b/operator/kopfobject.py @@ -0,0 +1,151 @@ +import asyncio + +from datetime import datetime, timezone +from typing import List, Mapping, Optional, TypeVar, Union + +import kopf +import kubernetes_asyncio + +from poolboy import Poolboy + +class KopfObject: + def __init__(self, + annotations: Union[kopf.Annotations, Mapping], + labels: Union[kopf.Labels, Mapping], + meta: Union[kopf.Meta, Mapping], + name: str, + namespace: str, + spec: Union[kopf.Spec, Mapping], + status: Union[kopf.Status, Mapping], + uid: str, + ): + self.annotations = annotations + self.labels = labels + self.lock = asyncio.Lock() + self.meta = meta + self.name = name + self.namespace = namespace + self.spec = spec + self.status = status + self.uid = uid + + def __str__(self) -> str: + return f"{self.kind} {self.name} in {self.namespace}" + + @property + def api_group_version(self): + return f"{self.api_group}/{self.api_version}" + + @property + def creation_datetime(self): + return datetime.strptime(self.creation_timestamp, "%Y-%m-%dT%H:%H:%S%z") + + @property + def creation_timestamp(self) -> str: + return self.meta['creationTimestamp'] + + @property + def deletion_timestamp(self) -> Optional[str]: + return self.meta.get('deletionTimestamp') + + @property + def metadata(self) -> Mapping: + return self.meta + + @property + def reference(self) -> Mapping: + return { + "apiVersion": self.api_group_version, + "kind": self.kind, + "name": self.name, + "namespace": self.namespace, + } + + def refresh(self, + annotations: kopf.Annotations, + labels: kopf.Labels, + meta: kopf.Meta, + spec: kopf.Spec, + status: kopf.Status, + uid: str, + ) -> None: + self.annotations = annotations + self.labels = labels + self.meta = meta + self.spec = spec + self.status = status + self.uid = uid + + def refresh_from_definition(self, definition: Mapping) -> None: + self.annotations = definition['metadata'].get('annotations', {}) + self.labels = definition['metadata'].get('labels', {}) + self.meta = definition['metadata'] + self.spec = definition['spec'] + self.status = definition.get('status', {}) + self.uid = definition['metadata']['uid'] + + async def delete(self): + try: + await Poolboy.custom_objects_api.delete_namespaced_custom_object( + group = self.api_group, + name = self.name, + namespace = self.namespace, + plural = self.plural, + version = self.api_version, + ) + except kubernetes_asyncio.client.exceptions.ApiException as e: + if e.status != 404: + raise + + async def json_patch(self, patch: List[Mapping]) -> None: + """Apply json patch to object status and update definition.""" + definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( + group = self.api_group, + name = self.name, + namespace = self.namespace, + plural = self.plural, + version = self.api_version, + body = patch, + _content_type = 'application/json-patch+json', + ) + self.refresh_from_definition(definition) + + async def json_patch_status(self, patch: List[Mapping]) -> None: + definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object_status( + group = self.api_group, + name = self.name, + namespace = self.namespace, + plural = self.plural, + version = self.api_version, + body = patch, + _content_type = 'application/json-patch+json', + ) + self.refresh_from_definition(definition) + + async def merge_patch(self, patch: Mapping) -> None: + """Apply merge patch to object status and update definition.""" + definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( + group = self.api_group, + name = self.name, + namespace = self.namespace, + plural = self.plural, + version = self.api_version, + body = patch, + _content_type = 'application/merge-patch+json' + ) + self.refresh_from_definition(definition) + + async def merge_patch_status(self, patch: Mapping) -> None: + """Apply merge patch to object status and update definition.""" + definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object_status( + group = self.api_group, + name = self.name, + namespace = self.namespace, + plural = self.plural, + version = self.api_version, + body = { + "status": patch + }, + _content_type = 'application/merge-patch+json' + ) + self.refresh_from_definition(definition) diff --git a/operator/poolboy_k8s.py b/operator/poolboy_k8s.py index c82d119..642d416 100644 --- a/operator/poolboy_k8s.py +++ b/operator/poolboy_k8s.py @@ -157,7 +157,7 @@ async def get_object( api_version: str, kind: str, name: str, - namespace: str = None, + namespace: Optional[str] = None, ) -> Optional[Mapping]: if '/' in api_version: group, version = api_version.split('/') diff --git a/operator/poolboy_templating.py b/operator/poolboy_templating.py index 3d7cd76..6322bbe 100644 --- a/operator/poolboy_templating.py +++ b/operator/poolboy_templating.py @@ -126,6 +126,13 @@ def seconds_to_interval(seconds:int) -> str: # name: alice type_filter_match_re = re.compile(r'^{{(?!.*{{).*\| *(bool|float|int|object) *}}$') +def check_condition(condition, template_style='jinja2', variables={}): + return jinja2process( + template="{{ " + condition + " | bool}}", + template_style=template_style, + variables=variables + ) + def j2now(utc=False, fmt=None): dt = datetime.now(timezone.utc if utc else None) return dt.strftime(fmt) if fmt else dt diff --git a/operator/resourceclaim.py b/operator/resourceclaim.py index caa373a..953adab 100644 --- a/operator/resourceclaim.py +++ b/operator/resourceclaim.py @@ -9,6 +9,7 @@ from deep_merge import deep_merge from jsonpatch_from_diff import jsonpatch_from_diff +from kopfobject import KopfObject from poolboy import Poolboy from poolboy_templating import recursive_process_template_strings @@ -20,19 +21,24 @@ ResourceHandleT = TypeVar('ResourceHandleT', bound='ResourceHandle') ResourceProviderT = TypeVar('ResourceProviderT', bound='ResourceProvider') -class ResourceClaim: +class ResourceClaim(KopfObject): + api_group = Poolboy.operator_domain + api_version = Poolboy.operator_version + kind = "ResourceClaim" + plural = "resourceclaims" + instances = {} - lock = asyncio.Lock() + class_lock = asyncio.Lock() - @staticmethod - def __register_definition(definition: Mapping) -> ResourceClaimT: + @classmethod + def __register_definition(cls, definition: Mapping) -> ResourceClaimT: name = definition['metadata']['name'] namespace = definition['metadata']['namespace'] - resource_claim = ResourceClaim.instances.get((namespace, name)) + resource_claim = cls.instances.get((namespace, name)) if resource_claim: resource_claim.refresh_from_definition(definition=definition) else: - resource_claim = ResourceClaim( + resource_claim = cls( annotations = definition['metadata'].get('annotations', {}), labels = definition['metadata'].get('labels', {}), meta = definition['metadata'], @@ -42,22 +48,23 @@ def __register_definition(definition: Mapping) -> ResourceClaimT: status = definition.get('status', {}), uid = definition['metadata']['uid'], ) - ResourceClaim.instances[(namespace, name)] = resource_claim + cls.instances[(namespace, name)] = resource_claim return resource_claim - @staticmethod - async def get(name: str, namespace: str) -> ResourceClaimT: - async with ResourceClaim.lock: - resource_claim = ResourceClaim.instances.get((namespace, name)) + @classmethod + async def get(cls, name: str, namespace: str) -> ResourceClaimT: + async with cls.class_lock: + resource_claim = cls.instances.get((namespace, name)) if resource_claim: return resource_claim definition = await Poolboy.custom_objects_api.get_namespaced_custom_object( Poolboy.operator_domain, Poolboy.operator_version, namespace, 'resourceclaims', name ) - return ResourceClaim.__register_definition(definition=definition) + return cls.__register_definition(definition=definition) - @staticmethod + @classmethod async def register( + cls, annotations: kopf.Annotations, labels: kopf.Labels, meta: kopf.Meta, @@ -67,8 +74,8 @@ async def register( status: kopf.Status, uid: str, ) -> ResourceClaimT: - async with ResourceClaim.lock: - resource_claim = ResourceClaim.instances.get((namespace, name)) + async with cls.class_lock: + resource_claim = cls.instances.get((namespace, name)) if resource_claim: resource_claim.refresh( annotations = annotations, @@ -79,7 +86,7 @@ async def register( uid = uid, ) else: - resource_claim = ResourceClaim( + resource_claim = cls( annotations = annotations, labels = labels, meta = meta, @@ -89,43 +96,21 @@ async def register( status = status, uid = uid, ) - ResourceClaim.instances[(namespace, name)] = resource_claim + cls.instances[(namespace, name)] = resource_claim return resource_claim - @staticmethod + @classmethod async def register_definition( + cls, definition: Mapping, ) -> ResourceClaimT: - async with ResourceClaim.lock: - return ResourceClaim.__register_definition(definition=definition) - - @staticmethod - async def unregister(name: str, namespace: str) -> Optional[ResourceClaimT]: - async with ResourceClaim.lock: - return ResourceClaim.instances.pop((namespace, name), None) - - def __init__(self, - annotations: Union[kopf.Annotations, Mapping], - labels: Union[kopf.Labels, Mapping], - meta: Union[kopf.Meta, Mapping], - name: str, - namespace: str, - spec: Union[kopf.Spec, Mapping], - status: Union[kopf.Status, Mapping], - uid: str, - ): - self.annotations = annotations - self.labels = labels - self.lock = asyncio.Lock() - self.meta = meta - self.name = name - self.namespace = namespace - self.spec = spec - self.status = status - self.uid = uid - - def __str__(self) -> str: - return f"ResourceClaim {self.name} in {self.namespace}" + async with cls.class_lock: + return cls.__register_definition(definition=definition) + + @classmethod + async def unregister(cls, name: str, namespace: str) -> Optional[ResourceClaimT]: + async with cls.class_lock: + return cls.instances.pop((namespace, name), None) @property def approval_state(self) -> Optional[str]: @@ -146,14 +131,6 @@ def auto_detach_when(self) -> Optional[str]: def claim_is_initialized(self) -> bool: return f"{Poolboy.operator_domain}/resource-claim-init-timestamp" in self.annotations - @property - def creation_datetime(self): - return datetime.strptime(self.creation_timestamp, "%Y-%m-%dT%H:%H:%S%z") - - @property - def creation_timestamp(self) -> str: - return self.meta['creationTimestamp'] - @property def has_resource_handle(self) -> bool: """Return whether this ResourceClaim is bound to a ResourceHandle.""" @@ -241,18 +218,10 @@ def lifespan_start_timestamp(self) -> Optional[str]: return timestamp return self.creation_timestamp - @property - def metadata(self) -> Mapping: - return self.meta - @property def parameter_values(self) -> Mapping: return self.status.get('provider', {}).get('parameterValues', {}) - @property - def provider_name(self) -> Optional[str]: - return - @property def requested_lifespan_end_datetime(self): timestamp = self.requested_lifespan_end_timestamp @@ -436,29 +405,6 @@ def get_resource_state_from_status(self, resource_number): return None return self.status['resources'][resource_number].get('state') - def refresh(self, - annotations: kopf.Annotations, - labels: kopf.Labels, - meta: kopf.Meta, - spec: kopf.Spec, - status: kopf.Status, - uid: str, - ) -> None: - self.annotations = annotations - self.labels = labels - self.meta = meta - self.spec = spec - self.status = status - self.uid = uid - - def refresh_from_definition(self, definition: Mapping) -> None: - self.annotations = definition['metadata'].get('annotations', {}) - self.labels = definition['metadata'].get('labels', {}) - self.meta = definition['metadata'] - self.spec = definition['spec'] - self.status = definition.get('status', {}) - self.uid = definition['metadata']['uid'] - async def update_status_from_handle(self, logger: kopf.ObjectLogger, resource_handle: ResourceHandleT @@ -520,17 +466,20 @@ async def update_status_from_handle(self, "path": "/status/lifespan/relativeMaximum", }) - for index, resource in enumerate(resource_handle.spec['resources']): - if 'waitingFor' in resource: + for resource_index, status_resource in enumerate(resource_handle.status_resources): + if ( + 'waitingFor' in status_resource and + status_resource['waitingFor'] != self.status_resources[resource_index].get('waitingFor') + ): patch.append({ "op": "add", - "path": f"/status/resources/{index}/waitingFor", - "value": resource['waitingFor'], + "path": f"/status/resources/{resource_index}/waitingFor", + "value": status_resource['waitingFor'], }) - elif 'waitingFor' in self.status_resources[index]: + elif 'waitingFor' in self.status_resources[resource_index]: patch.append({ "op": "remove", - "path": f"/status/resources/{index}/waitingFor", + "path": f"/status/resources/{resource_index}/waitingFor", }) if patch: @@ -590,15 +539,6 @@ async def assign_resource_providers(self, logger) -> None: f"to ResourceClaim {self.name} in {self.namespace}" ) - async def delete(self): - await Poolboy.custom_objects_api.delete_namespaced_custom_object( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourceclaims', - version = Poolboy.operator_version, - ) - async def detach(self, resource_handle): await self.merge_patch_status({ "resourceHandle": { @@ -628,7 +568,7 @@ async def get_resource_providers(self, resources:Optional[List[Mapping]]=None) - async def get_resources_from_provider(self, resource_handle: Optional[ResourceHandleT]=None) -> List[Mapping]: """Return resources for this claim as defined by ResourceProvider""" resource_provider = await self.get_resource_provider() - return await resource_provider.get_claim_resources( + return await resource_provider.get_resources( resource_claim = self, resource_handle = resource_handle, ) @@ -664,19 +604,6 @@ async def initialize_claim(self, logger): await self.merge_patch(patch) logger.info(f"ResourceClaim {self.name} in {self.namespace} initialized") - async def json_patch_status(self, patch: List[Mapping]) -> None: - """Apply json patch to object status and update definition.""" - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object_status( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourceclaims', - version = Poolboy.operator_version, - body = patch, - _content_type = 'application/json-patch+json', - ) - self.refresh_from_definition(definition) - async def manage(self, logger) -> None: async with self.lock: if self.lifespan_start_datetime \ @@ -814,6 +741,22 @@ async def __manage_resource_handle(self, ) -> None: patch = [] + # Ensure ResourceHandle provider matches ResourceClaim + if self.has_resource_provider: + if resource_handle.spec.get('provider') != self.spec['provider']: + logger.info(f"Setting provider on {resource_handle}") + patch.append({ + "op": "add", + "path": "/spec/provider", + "value": self.spec['provider'] + }) + elif resource_handle.has_resource_provider: + logger.info(f"Removing provider from {resource_handle}") + patch.append({ + "op": "remove", + "path": "/spec/provider", + }) + # Add any new resources from claim to handle for resource_index in range(len(resource_handle.resources), len(resource_claim_resources)): resource = resource_claim_resources[resource_index] @@ -879,44 +822,7 @@ async def __manage_resource_handle(self, }) if patch: - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( - body = patch, - group = Poolboy.operator_domain, - name = resource_handle.name, - namespace = resource_handle.namespace, - plural = 'resourcehandles', - version = Poolboy.operator_version, - _content_type = 'application/json-patch+json', - ) - resource_handle.refresh_from_definition(definition) - - async def merge_patch(self, patch: Mapping) -> None: - """Apply merge patch to object status and update definition.""" - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourceclaims', - version = Poolboy.operator_version, - body = patch, - _content_type = 'application/merge-patch+json' - ) - self.refresh_from_definition(definition) - - async def merge_patch_status(self, patch: Mapping) -> None: - """Apply merge patch to object status and update definition.""" - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object_status( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourceclaims', - version = Poolboy.operator_version, - body = { - "status": patch - }, - _content_type = 'application/merge-patch+json' - ) - self.refresh_from_definition(definition) + await resource_handle.json_patch(patch) async def refetch(self) -> Optional[ResourceClaimT]: try: @@ -927,7 +833,7 @@ async def refetch(self) -> Optional[ResourceClaimT]: return self except kubernetes_asyncio.client.exceptions.ApiException as e: if e.status == 404: - ResourceClaim.unregister(name=self.name, namespace=self.namespace) + self.unregister(name=self.name, namespace=self.namespace) return None else: raise @@ -1086,7 +992,7 @@ async def validate_with_provider(self, } if 'resources' not in self.status: - resources = await resource_provider.get_claim_resources( + resources = await resource_provider.get_resources( parameter_values = parameter_values, resource_claim = self, ) @@ -1098,44 +1004,62 @@ async def validate_with_provider(self, ] await self.merge_patch_status(patch) - async def remove_resource_from_status(self, index): - patch = [{ - "op": "remove", - "path": f"/status/resources/{index}/state", - }] - - if self.has_resource_provider: - resource_provider = await self.get_resource_provider() - if resource_provider.status_summary_template: - del self.status_resources[index]['state'] - patch.append({ - "op": "add", - "path": "/status/summary", - "value": resource_provider.make_status_summary(self), - }) - - await self.json_patch_status(patch) + async def remove_resource_from_status(self, + index: int, + logger: kopf.ObjectLogger, + ): + patch = [] + if 'state' in self_status_resources[index]: + del self.status_resources[index]['state'] + patch.append({ + "op": "remove", + "path": f"/status/resources/{index}/state", + }) + await self.__update_status( + logger=logger, + patch=patch, + ) - async def update_resource_in_status(self, index, state): + async def update_resource_in_status(self, + index: int, + logger: kopf.ObjectLogger, + state: Mapping, + ): patch = [] if self.status_resources[index].get('state') != state: + self.status_resources[index]['state'] = state patch.append({ "op": "add", "path": f"/status/resources/{index}/state", "value": state, }) + await self.__update_status( + logger=logger, + patch=patch, + ) + + async def __update_status(self, + logger: kopf.ObjectLogger, + patch: List[Mapping]=[], + ): + # FIXME - add healthy + # FIXME - add ready if self.has_resource_provider: resource_provider = await self.get_resource_provider() if resource_provider.status_summary_template: - self.status_resources[index]['state'] = state - status_summary = resource_provider.make_status_summary(self) - if status_summary != self.status.get('summary'): - patch.append({ - "op": "add", - "path": "/status/summary", - "value": status_summary, - }) - + try: + status_summary = resource_provider.make_status_summary( + resource_claim=self, + resources=self.status_resources, + ) + if status_summary != self.status.get('summary'): + patch.append({ + "op": "add", + "path": "/status/summary", + "value": status_summary, + }) + except Exception: + logger.exception(f"Failed to generate status summary for {self}") if patch: await self.json_patch_status(patch) diff --git a/operator/resourcehandle.py b/operator/resourcehandle.py index 86e0d08..c1f78a5 100644 --- a/operator/resourcehandle.py +++ b/operator/resourcehandle.py @@ -16,27 +16,34 @@ import resourceprovider import resourcewatcher +from kopfobject import KopfObject from poolboy import Poolboy from poolboy_templating import recursive_process_template_strings, seconds_to_interval ResourceClaimT = TypeVar('ResourceClaimT', bound='ResourceClaim') ResourceHandleT = TypeVar('ResourceHandleT', bound='ResourceHandle') ResourcePoolT = TypeVar('ResourcePoolT', bound='ResourcePool') +ResourceProviderT = TypeVar('ResourceProviderT', bound='ResourceProvider') + +class ResourceHandle(KopfObject): + api_group = Poolboy.operator_domain + api_version = Poolboy.operator_version + kind = "ResourceHandle" + plural = "resourcehandles" -class ResourceHandle: all_instances = {} bound_instances = {} unbound_instances = {} - lock = asyncio.Lock() + class_lock = asyncio.Lock() - @staticmethod - def __register_definition(definition: Mapping) -> ResourceHandleT: + @classmethod + def __register_definition(cls, definition: Mapping) -> ResourceHandleT: name = definition['metadata']['name'] - resource_handle = ResourceHandle.all_instances.get(name) + resource_handle = cls.all_instances.get(name) if resource_handle: resource_handle.refresh_from_definition(definition=definition) else: - resource_handle = ResourceHandle( + resource_handle = cls( annotations = definition['metadata'].get('annotations', {}), labels = definition['metadata'].get('labels', {}), meta = definition['metadata'], @@ -49,15 +56,16 @@ def __register_definition(definition: Mapping) -> ResourceHandleT: resource_handle.__register() return resource_handle - @staticmethod + @classmethod async def bind_handle_to_claim( + cls, logger: kopf.ObjectLogger, resource_claim: ResourceClaimT, resource_claim_resources: List[Mapping], ) -> Optional[ResourceHandleT]: - async with ResourceHandle.lock: + async with cls.class_lock: # Check if there is already an assigned claim - resource_handle = ResourceHandle.bound_instances.get((resource_claim.namespace, resource_claim.name)) + resource_handle = cls.bound_instances.get((resource_claim.namespace, resource_claim.name)) if resource_handle: return resource_handle @@ -66,7 +74,11 @@ async def bind_handle_to_claim( claim_status_resources = resource_claim.status_resources # Loop through unbound instances to find best match - for resource_handle in ResourceHandle.unbound_instances.values(): + for resource_handle in cls.unbound_instances.values(): + # Skip unhealthy + if resource_handle.is_healthy == False: + continue + # Honor explicit pool requests if resource_claim.resource_pool_name \ and resource_claim.resource_pool_name != resource_handle.resource_pool_name: @@ -78,6 +90,16 @@ async def bind_handle_to_claim( continue diff_count = 0 + + # Prefer handles with known healthy status + if resource_handle.is_healthy == None: + diff_count += 0.1 + # Prefer handles that are ready + if resource_handle.is_ready == False: + diff_count += 0.01 + elif resource_handle.is_ready == None: + diff_count += 0.001 + is_match = True handle_resources = resource_handle.resources if len(resource_claim_resources) < len(handle_resources): @@ -123,7 +145,7 @@ async def bind_handle_to_claim( matched_resource_handles.append((diff_count, resource_handle)) # Bind the oldest ResourceHandle with the smallest difference score - matched_resource_handles.sort(key=lambda item: f"{item[0]:09d} {item[1].creation_timestamp}") + matched_resource_handles.sort(key=lambda item: f"{item[0]:012.3f} {item[1].creation_timestamp}") for matched_resource_handle_item in matched_resource_handles: matched_resource_handle = matched_resource_handle_item[1] patch = [ @@ -161,16 +183,8 @@ async def bind_handle_to_claim( }) try: - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( - group = Poolboy.operator_domain, - name = matched_resource_handle.name, - namespace = matched_resource_handle.namespace, - plural = 'resourcehandles', - version = Poolboy.operator_version, - _content_type = 'application/json-patch+json', - body = patch, - ) - matched_resource_handle = ResourceHandle.__register_definition(definition=definition) + await matched_resource_handle.json_patch(patch) + matched_resource_handle.__register() except kubernetes_asyncio.client.exceptions.ApiException as exception: if exception.status == 404: logger.warning(f"Attempt to bind deleted {matched_resource_handle} to {resource_claim}") @@ -196,51 +210,12 @@ async def bind_handle_to_claim( ) return matched_resource_handle - @staticmethod - async def create_for_claim( + @classmethod + async def create_for_claim(cls, logger: kopf.ObjectLogger, resource_claim: ResourceClaimT, resource_claim_resources: List[Mapping], ): - resource_providers = await resource_claim.get_resource_providers(resource_claim_resources) - vars_ = {} - resources = [] - lifespan_default_timedelta = None - lifespan_maximum = None - lifespan_maximum_timedelta = None - lifespan_relative_maximum = None - lifespan_relative_maximum_timedelta = None - for i, claim_resource in enumerate(resource_claim_resources): - provider = resource_providers[i] - vars_.update(provider.vars) - - provider_lifespan_default_timedelta = provider.get_lifespan_default_timedelta(resource_claim) - if provider_lifespan_default_timedelta: - if not lifespan_default_timedelta \ - or provider_lifespan_default_timedelta < lifespan_default_timedelta: - lifespan_default_timedelta = provider_lifespan_default_timedelta - - provider_lifespan_maximum_timedelta = provider.get_lifespan_maximum_timedelta(resource_claim) - if provider_lifespan_maximum_timedelta: - if not lifespan_maximum_timedelta \ - or provider_lifespan_maximum_timedelta < lifespan_maximum_timedelta: - lifespan_maximum = provider.lifespan_maximum - lifespan_maximum_timedelta = provider_lifespan_maximum_timedelta - - provider_lifespan_relative_maximum_timedelta = provider.get_lifespan_relative_maximum_timedelta(resource_claim) - if provider_lifespan_relative_maximum_timedelta: - if not lifespan_relative_maximum_timedelta \ - or provider_lifespan_relative_maximum_timedelta < lifespan_relative_maximum_timedelta: - lifespan_relative_maximum = provider.lifespan_relative_maximum - lifespan_relative_maximum_timedelta = provider_lifespan_relative_maximum_timedelta - - resources_item = {"provider": provider.as_reference()} - if 'name' in claim_resource: - resources_item['name'] = claim_resource['name'] - if 'template' in claim_resource: - resources_item['template'] = claim_resource['template'] - resources.append(resources_item) - definition = { 'apiVersion': Poolboy.operator_api_version, 'kind': 'ResourceHandle', @@ -259,11 +234,61 @@ async def create_for_claim( 'name': resource_claim.name, 'namespace': resource_claim.namespace }, - 'resources': resources, - 'vars': vars_, } } + resources = [] + lifespan_default_timedelta = None + lifespan_maximum = None + lifespan_maximum_timedelta = None + lifespan_relative_maximum = None + lifespan_relative_maximum_timedelta = None + if resource_claim.has_resource_provider: + resource_provider = await resource_claim.get_resource_provider() + definition['spec']['resources'] = resource_claim_resources + definition['spec']['provider'] = resource_claim.spec['provider'] + lifespan_default_timedelta = resource_provider.get_lifespan_default_timedelta(resource_claim) + lifespan_maximum = resource_provider.lifespan_maximum + lifespan_maximum_timedelta = resource_provider.get_lifespan_maximum_timedelta(resource_claim) + lifespan_relative_maximum = resource_provider.lifespan_relative_maximum + lifespan_relative_maximum_timedelta = resource_provider.get_lifespan_maximum_timedelta(resource_claim) + else: + vars_ = {} + + resource_providers = await resource_claim.get_resource_providers(resource_claim_resources) + for i, claim_resource in enumerate(resource_claim_resources): + provider = resource_providers[i] + vars_.update(provider.vars) + + provider_lifespan_default_timedelta = provider.get_lifespan_default_timedelta(resource_claim) + if provider_lifespan_default_timedelta: + if not lifespan_default_timedelta \ + or provider_lifespan_default_timedelta < lifespan_default_timedelta: + lifespan_default_timedelta = provider_lifespan_default_timedelta + + provider_lifespan_maximum_timedelta = provider.get_lifespan_maximum_timedelta(resource_claim) + if provider_lifespan_maximum_timedelta: + if not lifespan_maximum_timedelta \ + or provider_lifespan_maximum_timedelta < lifespan_maximum_timedelta: + lifespan_maximum = provider.lifespan_maximum + lifespan_maximum_timedelta = provider_lifespan_maximum_timedelta + + provider_lifespan_relative_maximum_timedelta = provider.get_lifespan_relative_maximum_timedelta(resource_claim) + if provider_lifespan_relative_maximum_timedelta: + if not lifespan_relative_maximum_timedelta \ + or provider_lifespan_relative_maximum_timedelta < lifespan_relative_maximum_timedelta: + lifespan_relative_maximum = provider.lifespan_relative_maximum + lifespan_relative_maximum_timedelta = provider_lifespan_relative_maximum_timedelta + + resources_item = {"provider": provider.as_reference()} + if 'name' in claim_resource: + resources_item['name'] = claim_resource['name'] + if 'template' in claim_resource: + resources_item['template'] = claim_resource['template'] + resources.append(resources_item) + + definition['spec']['resources'] = resources + definition['spec']['vars'] = vars_ lifespan_end_datetime = None lifespan_start_datetime = datetime.now(timezone.utc) @@ -316,15 +341,16 @@ async def create_for_claim( plural = 'resourcehandles', version = Poolboy.operator_version, ) - resource_handle = await ResourceHandle.register_definition(definition=definition) + resource_handle = await cls.register_definition(definition=definition) logger.info( f"Created ResourceHandle {resource_handle.name} for " f"ResourceClaim {resource_claim.name} in {resource_claim.namespace}" ) return resource_handle - @staticmethod + @classmethod async def create_for_pool( + cls, logger: kopf.ObjectLogger, resource_pool: ResourcePoolT, ): @@ -339,11 +365,29 @@ async def create_for_pool( }, }, "spec": { - "resourcePool": resource_pool.ref, - "resources": resource_pool.resources, + "resourcePool": resource_pool.reference, "vars": resource_pool.vars, } } + + if resource_pool.has_resource_provider: + definition['spec']['provider'] = resource_pool.spec['provider'] + resource_provider = await resource_pool.get_resource_provider() + if resource_provider.has_lifespan: + definition['spec']['lifespan'] = {} + if resource_provider.lifespan_default: + definition['spec']['lifespan']['default'] = resource_provider.lifespan_default + if resource_provider.lifespan_maximum: + definition['spec']['lifespan']['maximum'] = resource_provider.lifespan_maximum + if resource_provider.lifespan_relative_maximum: + definition['spec']['lifespan']['maximum'] = resource_provider.lifespan_relative_maximum + if resource_provider.lifespan_unclaimed: + definition['spec']['lifespan']['end'] = ( + datetime.now(timezone.utc) + resource_provider.lifespan_unclaimed_timedelta + ).strftime("%FT%TZ") + else: + definition['spec']['resources'] = resource_pool.resources + if resource_pool.has_lifespan: definition['spec']['lifespan'] = {} if resource_pool.lifespan_default: @@ -364,18 +408,19 @@ async def create_for_pool( plural = "resourcehandles", version = Poolboy.operator_version, ) - resource_handle = await ResourceHandle.register_definition(definition=definition) + resource_handle = await cls.register_definition(definition=definition) logger.info(f"Created ResourceHandle {resource_handle.name} for ResourcePool {resource_pool.name}") return resource_handle - @staticmethod + @classmethod async def delete_unbound_handles_for_pool( + cls, logger: kopf.ObjectLogger, resource_pool: ResourcePoolT, ) -> List[ResourceHandleT]: - async with ResourceHandle.lock: + async with cls.class_lock: resource_handles = [] - for resource_handle in list(ResourceHandle.unbound_instances.values()): + for resource_handle in list(cls.unbound_instances.values()): if resource_handle.resource_pool_name == resource_pool.name \ and resource_handle.resource_pool_namespace == resource_pool.namespace: logger.info( @@ -386,10 +431,10 @@ async def delete_unbound_handles_for_pool( await resource_handle.delete() return resource_handles - @staticmethod - async def get(name: str) -> Optional[ResourceHandleT]: - async with ResourceHandle.lock: - resource_handle = ResourceHandle.all_instances.get(name) + @classmethod + async def get(cls, name: str) -> Optional[ResourceHandleT]: + async with cls.class_lock: + resource_handle = cls.all_instances.get(name) if resource_handle: return resource_handle definition = await Poolboy.custom_objects_api.get_namespaced_custom_object( @@ -397,15 +442,19 @@ async def get(name: str) -> Optional[ResourceHandleT]: ) if 'deletionTimestamp' in definition['metadata']: return None - return ResourceHandle.__register_definition(definition=definition) + return cls.__register_definition(definition=definition) - @staticmethod - def get_from_cache(name: str) -> Optional[ResourceHandleT]: - return ResourceHandle.all_instances.get(name) + @classmethod + def get_from_cache(cls, name: str) -> Optional[ResourceHandleT]: + return cls.all_instances.get(name) - @staticmethod - async def get_unbound_handles_for_pool(resource_pool: ResourcePoolT, logger) -> List[ResourceHandleT]: - async with ResourceHandle.lock: + @classmethod + async def get_unbound_handles_for_pool( + cls, + resource_pool: ResourcePoolT, + logger: kopf.ObjectLogger, + ) -> List[ResourceHandleT]: + async with cls.class_lock: resource_handles = [] for resource_handle in ResourceHandle.unbound_instances.values(): if resource_handle.resource_pool_name == resource_pool.name \ @@ -413,9 +462,9 @@ async def get_unbound_handles_for_pool(resource_pool: ResourcePoolT, logger) -> resource_handles.append(resource_handle) return resource_handles - @staticmethod - async def preload(logger: kopf.ObjectLogger) -> None: - async with ResourceHandle.lock: + @classmethod + async def preload(cls, logger: kopf.ObjectLogger) -> None: + async with cls.class_lock: _continue = None while True: resource_handle_list = await Poolboy.custom_objects_api.list_namespaced_custom_object( @@ -424,13 +473,14 @@ async def preload(logger: kopf.ObjectLogger) -> None: limit = 50, ) for definition in resource_handle_list['items']: - ResourceHandle.__register_definition(definition=definition) + cls.__register_definition(definition=definition) _continue = resource_handle_list['metadata'].get('continue') if not _continue: break - @staticmethod + @classmethod async def register( + cls, annotations: kopf.Annotations, labels: kopf.Labels, meta: kopf.Meta, @@ -440,8 +490,8 @@ async def register( status: kopf.Status, uid: str, ) -> ResourceHandleT: - async with ResourceHandle.lock: - resource_handle = ResourceHandle.all_instances.get(name) + async with cls.class_lock: + resource_handle = cls.all_instances.get(name) if resource_handle: resource_handle.refresh( annotations = annotations, @@ -452,7 +502,7 @@ async def register( uid = uid, ) else: - resource_handle = ResourceHandle( + resource_handle = cls( annotations = annotations, labels = labels, meta = meta, @@ -465,27 +515,27 @@ async def register( resource_handle.__register() return resource_handle - @staticmethod - async def register_definition(definition: Mapping) -> ResourceHandleT: - async with ResourceHandle.lock: - return ResourceHandle.__register_definition(definition) + @classmethod + async def register_definition(cls, definition: Mapping) -> ResourceHandleT: + async with cls.class_lock: + return cls.__register_definition(definition) - @staticmethod - async def unregister(name: str) -> Optional[ResourceHandleT]: - async with ResourceHandle.lock: - resource_handle = ResourceHandle.all_instances.pop(name, None) + @classmethod + async def unregister(cls, name: str) -> Optional[ResourceHandleT]: + async with cls.class_lock: + resource_handle = cls.all_instances.pop(name, None) if resource_handle: resource_handle.__unregister() return resource_handle def __init__(self, - annotations: kopf.Annotations, - labels: kopf.Labels, - meta: kopf.Meta, + annotations: Union[kopf.Annotations, Mapping], + labels: Union[kopf.Labels, Mapping], + meta: Union[kopf.Meta, Mapping], name: str, namespace: str, - spec: kopf.Spec, - status: kopf.Status, + spec: Union[kopf.Spec, Mapping], + status: Union[kopf.Status, Mapping], uid: str, ): self.annotations = annotations @@ -497,8 +547,6 @@ def __init__(self, self.spec = spec self.status = status self.uid = uid - self.resource_states = [] - self.resource_refresh_datetime = [] def __str__(self) -> str: return f"ResourceHandle {self.name}" @@ -508,37 +556,25 @@ def __register(self) -> None: Add ResourceHandle to register of bound or unbound instances. This method must be called with the ResourceHandle.lock held. """ - ResourceHandle.all_instances[self.name] = self + self.all_instances[self.name] = self if self.is_bound: - ResourceHandle.bound_instances[( + self.bound_instances[( self.resource_claim_namespace, self.resource_claim_name )] = self - ResourceHandle.unbound_instances.pop(self.name, None) + self.unbound_instances.pop(self.name, None) elif not self.is_deleting: - ResourceHandle.unbound_instances[self.name] = self + self.unbound_instances[self.name] = self def __unregister(self) -> None: - ResourceHandle.all_instances.pop(self.name, None) - ResourceHandle.unbound_instances.pop(self.name, None) + self.all_instances.pop(self.name, None) + self.unbound_instances.pop(self.name, None) if self.is_bound: - ResourceHandle.bound_instances.pop( + self.bound_instances.pop( (self.resource_claim_namespace, self.resource_claim_name), None, ) - @property - def creation_datetime(self): - return datetime.strptime(self.creation_timestamp, "%Y-%m-%dT%H:%M:%S%z") - - @property - def creation_timestamp(self) -> str: - return self.meta['creationTimestamp'] - - @property - def deletion_timestamp(self) -> str: - return self.meta.get('deletionTimestamp') - @property def guid(self) -> str: name = self.name @@ -554,6 +590,11 @@ def guid(self) -> str: def has_lifespan_end(self) -> bool: 'end' in self.spec.get('lifespan', {}) + @property + def has_resource_provider(self) -> bool: + """Return whether this ResourceHandle is managed by a ResourceProvider.""" + return 'provider' in self.spec + @property def ignore(self) -> bool: return Poolboy.ignore_label in self.labels @@ -570,6 +611,10 @@ def is_deleting(self) -> bool: def is_from_resource_pool(self) -> bool: return 'resourcePool' in self.spec + @property + def is_healthy(self) -> Optional[bool]: + return self.status.get('healthy') + @property def is_past_lifespan_end(self) -> bool: dt = self.lifespan_end_datetime @@ -577,6 +622,10 @@ def is_past_lifespan_end(self) -> bool: return False return dt < datetime.now(timezone.utc) + @property + def is_ready(self) -> Optional[bool]: + return self.status.get('ready') + @property def lifespan_end_datetime(self) -> Any: timestamp = self.lifespan_end_timestamp @@ -590,17 +639,8 @@ def lifespan_end_timestamp(self) -> Optional[str]: return lifespan.get('end') @property - def metadata(self) -> Mapping: - return self.meta - - @property - def reference(self) -> Mapping: - return { - "apiVersion": Poolboy.operator_api_version, - "kind": "ResourceHandle", - "name": self.name, - "namespace": self.namespace, - } + def parameter_values(self) -> Mapping: + return self.spec.get('provider', {}).get('parameterValues', {}) @property def resource_claim_name(self) -> Optional[str]: @@ -620,10 +660,18 @@ def resource_pool_namespace(self) -> Optional[str]: if 'resourcePool' in self.spec: return self.spec['resourcePool'].get('namespace', Poolboy.namespace) + @property + def resource_provider_name(self) -> Optional[str]: + return self.spec.get('provider', {}).get('name') + @property def resources(self) -> List[Mapping]: return self.spec.get('resources', []) + @property + def status_resources(self) -> List[Mapping]: + return self.status.get('resources', []) + @property def vars(self) -> Mapping: return self.spec.get('vars', {}) @@ -692,42 +740,6 @@ def get_lifespan_end_maximum_datetime(self, resource_claim=None): else: return maximum_end - def refresh(self, - annotations: kopf.Annotations, - labels: kopf.Labels, - meta: kopf.Meta, - spec: kopf.Spec, - status: kopf.Status, - uid: str, - ) -> None: - self.annotations = annotations - self.labels = labels - self.meta = meta - self.spec = spec - self.status = status - self.uid = uid - - def refresh_from_definition(self, definition: Mapping) -> None: - self.annotations = definition['metadata'].get('annotations', {}) - self.labels = definition['metadata'].get('labels', {}) - self.meta = definition['metadata'] - self.spec = definition['spec'] - self.status = definition.get('status', {}) - self.uid = definition['metadata']['uid'] - - async def delete(self): - try: - await Poolboy.custom_objects_api.delete_namespaced_custom_object( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourcehandles', - version = Poolboy.operator_version, - ) - except kubernetes_asyncio.client.exceptions.ApiException as e: - if e.status != 404: - raise - async def get_resource_claim(self) -> Optional[ResourceClaimT]: if not self.is_bound: return None @@ -741,7 +753,11 @@ async def get_resource_pool(self) -> Optional[ResourcePoolT]: return None return await resourcepool.ResourcePool.get(self.resource_pool_name) - async def get_resource_providers(self): + async def get_resource_provider(self) -> ResourceProviderT: + """Return ResourceProvider configured to manage ResourceHandle.""" + return await resourceprovider.ResourceProvider.get(self.resource_provider_name) + + async def get_resource_providers(self) -> List[ResourceProviderT]: resource_providers = [] for resource in self.spec.get('resources', []): resource_providers.append( @@ -750,41 +766,29 @@ async def get_resource_providers(self): return resource_providers async def get_resource_states(self, logger: kopf.ObjectLogger) -> List[Mapping]: - for i, resource in enumerate(self.spec['resources']): - if i >= len(self.resource_states): - self.resource_states.append(None) - self.resource_refresh_datetime.append(None) - elif ( - self.resource_states[i] and - self.resource_refresh_datetime[i] and - (datetime.now(timezone.utc) - self.resource_refresh_datetime[i]).total_seconds() > Poolboy.resource_refresh_interval - ): + resource_states = [] + for resource_index, resource in enumerate(self.resources): + if resource_index >= len(self.status_resources): + resource_states.append(None) continue - - reference = resource.get('reference') + reference = self.status_resources[resource_index].get('reference') if not reference: + resource_states.append(None) continue - api_version = reference['apiVersion'] kind = reference['kind'] name = reference['name'] namespace = reference.get('namespace') - try: - self.resource_states[i] = await poolboy_k8s.get_object( - api_version = api_version, - kind = kind, - name = name, - namespace = namespace, - ) - self.resource_refresh_datetime[i] = datetime.now(timezone.utc) - except kubernetes_asyncio.client.exceptions.ApiException as e: - if e.status == 404: - _name = f"{name} in {namespace}" if namespace else name - logger.warning(f"Mangaged resource {api_version} {kind} {_name} not found.") + resource = await resourcewatcher.ResourceWatcher.get_resource( + api_version=api_version, kind=kind, name=name, namespace=namespace, + ) + resource_states.append(resource) + if not resource: + if namespace: + logger.warning(f"Mangaged resource {api_version} {kind} {name} in {namespace} not found.") else: - raise - - return self.resource_states + logger.warning(f"Mangaged resource {api_version} {kind} {name} not found.") + return resource_states async def handle_delete(self, logger: kopf.ObjectLogger) -> None: for resource in self.spec.get('resources', []): @@ -823,21 +827,10 @@ async def handle_delete(self, logger: kopf.ObjectLogger) -> None: async def handle_resource_event(self, logger: Union[logging.Logger, logging.LoggerAdapter], - resource_index: int, - resource_state: Mapping, ) -> None: async with self.lock: - # Extend resource_states as needed - if resource_index >= len(self.resource_states): - self.resource_states.extend( - [None] * (1 + resource_index - len(self.resource_states)) - ) - if resource_index >= len(self.resource_refresh_datetime): - self.resource_refresh_datetime.extend( - [None] * (1 + resource_index - len(self.resource_refresh_datetime)) - ) - self.resource_states[resource_index] = resource_state - self.resource_refresh_datetime[resource_index] = datetime.now(timezone.utc) + if self.has_resource_provider: + await self.update_status(logger=logger) async def manage(self, logger: kopf.ObjectLogger) -> None: async with self.lock: @@ -861,20 +854,54 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: await self.delete() return + await self.update_resources(logger=logger, resource_claim=resource_claim) + resource_providers = await self.get_resource_providers() resource_states = await self.get_resource_states(logger=logger) + status_resources = self.status_resources patch = [] + status_patch = [] resources_to_create = [] + if not self.status: + status_patch.append({ + "op": "add", + "path": "/status", + "value": {}, + }) + if 'resources' not in self.status: + status_patch.append({ + "op": "add", + "path": "/status/resources", + "value": [], + }) + for resource_index, resource in enumerate(self.spec['resources']): resource_provider = resource_providers[resource_index] resource_state = resource_states[resource_index] + if len(status_resources) <= resource_index: + status_resources.append({}) + status_patch.append({ + "op": "add", + "path": f"/status/resources/{resource_index}", + "value": {}, + }) + status_resource = status_resources[resource_index] + + if 'name' in resource and resource['name'] != status_resource.get('name'): + status_resource['name'] = resource['name'] + status_patch.append({ + "op": "add", + "path": f"/status/resources/{resource_index}/name", + "value": resource['name'], + }) + if resource_provider.resource_requires_claim and not resource_claim: - if 'ResourceClaim' != resource.get('waitingFor'): - patch.append({ + if 'ResourceClaim' != status_resource.get('waitingFor'): + status_patch.append({ "op": "add", - "path": f"/spec/resources/{resource_index}/waitingFor", + "path": f"/status/resources/{resource_index}/waitingFor", "value": "ResourceClaim", }) continue @@ -916,10 +943,10 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: ) if wait_for_linked_provider: - if 'Linked ResourceProvider' != resource.get('waitingFor'): - patch.append({ + if 'Linked ResourceProvider' != status_resource.get('waitingFor'): + status_patch.append({ "op": "add", - "path": f"/spec/resources/{resource_index}/waitingFor", + "path": f"/status/resources/{resource_index}/waitingFor", "value": "Linked ResourceProvider", }) continue @@ -933,10 +960,10 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: vars_ = vars_, ) if not resource_definition: - if 'Resource Definition' != resource.get('waitingFor'): - patch.append({ + if 'Resource Definition' != status_resource.get('waitingFor'): + status_patch.append({ "op": "add", - "path": f"/spec/resources/{resource_index}/waitingFor", + "path": f"/status/resources/{resource_index}/waitingFor", "value": "Resource Definition", }) continue @@ -954,19 +981,29 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: if resource_namespace: reference['namespace'] = resource_namespace - if 'reference' not in resource: + if 'reference' not in status_resource: + # Add reference to status resources + status_resource['reference'] = reference + status_patch.append({ + "op": "add", + "path": f"/status/resources/{resource_index}/reference", + "value": reference, + }) + # Retain reference in spec for compatibility patch.append({ "op": "add", "path": f"/spec/resources/{resource_index}/reference", "value": reference, }) - if 'waitingFor' in resource: - patch.append({ + # Remove waitingFor from status if present as we are preceeding to resource creation + if 'waitingFor' in status_resource: + status_patch.append({ "op": "remove", - "path": f"/spec/resources/{resource_index}/waitingFor", + "path": f"/status/resources/{resource_index}/waitingFor", }) try: - resource_states[resource_index] = resource_state = await poolboy_k8s.get_object( + # Get resource state as it would not have been fetched above. + resource_states[resource_index] = resource_state = await resourcewatcher.ResourceWatcher.get_resource( api_version = resource_api_version, kind = resource_kind, name = resource_name, @@ -975,25 +1012,25 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: except kubernetes_asyncio.client.exceptions.ApiException as e: if e.status != 404: raise - elif resource_api_version != resource['reference']['apiVersion']: + elif resource_api_version != status_resource['reference']['apiVersion']: raise kopf.TemporaryError( f"ResourceHandle {self.name} would change from apiVersion " - f"{resource['reference']['apiVersion']} to {resource_api_version}!", + f"{status_resource['reference']['apiVersion']} to {resource_api_version}!", delay=600 ) - elif resource_kind != resource['reference']['kind']: + elif resource_kind != status_resource['reference']['kind']: raise kopf.TemporaryError( f"ResourceHandle {self.name} would change from kind " - f"{resource['reference']['kind']} to {resource_kind}!", + f"{status_resource['reference']['kind']} to {resource_kind}!", delay=600 ) else: # Maintain name and namespace - if resource_name != resource['reference']['name']: - resource_name = resource['reference']['name'] + if resource_name != status_resource['reference']['name']: + resource_name = status_resource['reference']['name'] resource_definition['metadata']['name'] = resource_name - if resource_namespace != resource['reference']['namespace']: - resource_namespace = resource['reference']['namespace'] + if resource_namespace != status_resource['reference'].get('namespace'): + resource_namespace = status_resource['reference']['namespace'] resource_definition['metadata']['namespace'] = resource_namespace resource_description = f"{resource_api_version} {resource_kind} {resource_name}" @@ -1019,16 +1056,9 @@ async def manage(self, logger: kopf.ObjectLogger) -> None: resources_to_create.append(resource_definition) if patch: - definition = await Poolboy.custom_objects_api.patch_namespaced_custom_object( - group = Poolboy.operator_domain, - name = self.name, - namespace = self.namespace, - plural = 'resourcehandles', - version = Poolboy.operator_version, - _content_type = 'application/json-patch+json', - body = patch, - ) - self.refresh_from_definition(definition) + await self.json_patch(patch) + if status_patch: + await self.json_patch_status(status_patch) if resource_claim: await resource_claim.update_status_from_handle(logger=logger, resource_handle=self) @@ -1047,7 +1077,215 @@ async def refetch(self) -> Optional[ResourceHandleT]: return self except kubernetes_asyncio.client.exceptions.ApiException as e: if e.status == 404: - ResourceHandle.unregister(name=self.name) + self.unregister(name=self.name) return None else: raise + + async def update_resources(self, + logger: kopf.ObjectLogger, + resource_claim: ResourceClaimT, + ): + # If no spec.provider then resources list is directly configured in the ResourceHandle + if 'provider' not in self.spec: + return + + try: + provider = await resourceprovider.ResourceProvider.get(self.resource_provider_name) + except kubernetes_asyncio.client.exceptions.ApiException as exception: + if exception.status == 404: + logger.warning(f"Missing ResourceProvider {self.resource_provider_name} to get resources for {self}") + return + else: + raise + + resources = await provider.get_resources( + resource_claim = resource_claim, + resource_handle = self, + ) + + if not 'resources' in self.spec: + await self.json_patch([{ + "op": "add", + "path": "/spec/resources", + "value": resources, + }]) + return + + patch = [] + for idx, resource in enumerate(resources): + if idx < len(self.spec['resources']): + current_provider = self.spec['resources'][idx]['provider']['name'] + updated_provider = resource['provider']['name'] + if current_provider != updated_provider: + logger.warning( + f"Refusing update resources in {self} as it would change " + f"ResourceProvider from {current_provider} to {updated_provider}" + ) + current_template = self.spec['resources'][idx].get('template') + updated_template = resource.get('template') + if current_template != updated_template: + patch.append({ + "op": "add", + "path": f"/spec/resources/{idx}/template", + "value": updated_template, + }) + else: + patch.append({ + "op": "add", + "path": f"/spec/resources/{idx}", + "value": resource + }) + + if patch: + await self.json_patch(patch) + logger.info(f"Updated resources for {self} from {provider}") + + async def update_status(self, + logger: kopf.ObjectLogger, + ) -> None: + patch = [] + if not self.status: + patch.append({ + "op": "add", + "path": "/status", + "value": {}, + }) + if not 'resources' in self.status: + patch.append({ + "op": "add", + "path": "/status/resources", + "value": [], + }) + + resources = deepcopy(self.resources) + resource_states = await self.get_resource_states(logger=logger) + for idx, state in enumerate(resource_states): + resources[idx]['state'] = state + if len(self.status_resources) < idx: + patch.append({ + "op": "add", + "path": f"/status/resources/{idx}", + "value": {}, + }) + + overall_ready = True + overall_healthy = True + + for idx, resource in enumerate(resources): + if resource['state']: + resource_provider = await resourceprovider.ResourceProvider.get(resource['provider']['name']) + resource_healthy = resource_provider.check_health( + logger = logger, + resource_handle = self, + resource_state = resource['state'], + ) + if resource_healthy: + resource_ready = resource_provider.check_readiness( + logger = logger, + resource_handle = self, + resource_state = resource['state'], + ) + else: + resource_ready = False + else: + resource_healthy = None + resource_ready = False + + # If the resource is not healthy then it is overall unhealthy. + # If the resource health is unknown then he overall health is unknown unless it is unhealthy. + if resource_healthy == False: + overall_healthy = False + elif resource_healthy == None: + if overall_healthy: + overall_healthy = None + + if resource_ready == False: + overall_ready = False + elif resource_ready == None: + if overall_ready: + overall_ready = None + + if len(self.status_resources) <= idx: + if resource_healthy != None: + patch.append({ + "op": "add", + "path": f"/status/resources/{idx}/healthy", + "value": resource_healthy, + }) + if resource_ready != None: + patch.append({ + "op": "add", + "path": f"/status/resources/{idx}/ready", + "value": resource_ready, + }) + else: + if resource_healthy == None: + if 'healthy' in self.status_resources[idx]: + patch.append({ + "op": "remove", + "path": f"/status/resources/{idx}/healthy", + }) + elif resource_healthy != self.status_resources[idx].get('healthy'): + patch.append({ + "op": "add", + "path": f"/status/resources/{idx}/healthy", + "value": resource_healthy, + }) + if resource_ready == None: + if 'ready' in self.status_resources[idx]: + patch.append({ + "op": "remove", + "path": f"/status/resources/{idx}/ready", + }) + elif resource_ready != self.status_resources[idx].get('ready'): + patch.append({ + "op": "add", + "path": f"/status/resources/{idx}/ready", + "value": resource_ready, + }) + + if overall_healthy == None: + if 'healthy' in self.status: + patch.append({ + "op": "remove", + "path": f"/status/healthy", + }) + elif overall_healthy != self.status.get('healthy'): + patch.append({ + "op": "add", + "path": f"/status/healthy", + "value": overall_healthy, + }) + + if overall_ready == None: + if 'ready' in self.status: + patch.append({ + "op": "remove", + "path": f"/status/ready", + }) + elif overall_ready != self.status.get('ready'): + patch.append({ + "op": "add", + "path": f"/status/ready", + "value": overall_ready, + }) + + resource_provider = await self.get_resource_provider() + if resource_provider.status_summary_template: + try: + status_summary = resource_provider.make_status_summary( + resource_handle=self, + resources=resources, + ) + if status_summary != self.status.get('summary'): + patch.append({ + "op": "add", + "path": "/status/summary", + "value": status_summary, + }) + except Exception: + logger.exception(f"Failed to generate status summary for {self}") + + if patch: + await self.json_patch_status(patch) diff --git a/operator/resourcepool.py b/operator/resourcepool.py index e577680..da1812e 100644 --- a/operator/resourcepool.py +++ b/operator/resourcepool.py @@ -7,23 +7,32 @@ from typing import List, Mapping, Optional, TypeVar import resourcehandle +import resourceprovider +from kopfobject import KopfObject from poolboy import Poolboy ResourceHandleT = TypeVar('ResourceHandleT', bound='ResourceHandle') ResourcePoolT = TypeVar('ResourcePoolT', bound='ResourcePool') +ResourceProviderT = TypeVar('ResourceProviderT', bound='ResourceProvider') + +class ResourcePool(KopfObject): + api_group = Poolboy.operator_domain + api_version = Poolboy.operator_version + kind = "ResourcePool" + plural = "resourcepools" -class ResourcePool: instances = {} - lock = asyncio.Lock() + class_lock = asyncio.Lock() - @staticmethod - async def get(name: str) -> ResourcePoolT: - async with ResourcePool.lock: - return ResourcePool.instances.get(name) + @classmethod + async def get(cls, name: str) -> ResourcePoolT: + async with cls.class_lock: + return cls.instances.get(name) - @staticmethod + @classmethod async def register( + cls, annotations: kopf.Annotations, labels: kopf.Labels, meta: kopf.Meta, @@ -33,8 +42,8 @@ async def register( status: kopf.Status, uid: str, ) -> ResourcePoolT: - async with ResourcePool.lock: - resource_pool = ResourcePool.instances.get(name) + async with cls.class_lock: + resource_pool = cls.instances.get(name) if resource_pool: resource_pool.refresh( annotations = annotations, @@ -45,7 +54,7 @@ async def register( uid = uid, ) else: - resource_pool = ResourcePool( + resource_pool = cls( annotations = annotations, labels = labels, meta = meta, @@ -58,35 +67,24 @@ async def register( resource_pool.__register() return resource_pool - @staticmethod - async def unregister(name: str) -> Optional[ResourcePoolT]: - async with ResourcePool.lock: - return ResourcePool.instances.pop(name, None) + @classmethod + async def unregister(cls, name: str) -> Optional[ResourcePoolT]: + async with cls.class_lock: + return cls.instances.pop(name, None) - def __init__(self, - annotations: kopf.Annotations, - labels: kopf.Labels, - meta: kopf.Meta, - name: str, - namespace: str, - spec: kopf.Spec, - status: kopf.Status, - uid: str, - ): - self.annotations = annotations - self.labels = labels - self.lock = asyncio.Lock() - self.meta = meta - self.name = name - self.namespace = namespace - self.spec = spec - self.status = status - self.uid = uid + @property + def delete_unhealthy_resource_handles(self) -> bool: + return self.spec.get('deleteUnhealthyResourceHandles', False) @property def has_lifespan(self) -> bool: return 'lifespan' in self.spec + @property + def has_resource_provider(self) -> bool: + """Return whether ResourceHandles for this pool are managed by a ResourceProvider.""" + return 'provider' in self.spec + @property def lifespan_default(self) -> int: return self.spec.get('lifespan', {}).get('default') @@ -116,21 +114,16 @@ def lifespan_unclaimed_timedelta(self): return timedelta(seconds=seconds) @property - def metadata(self) -> Mapping: - return self.meta + def max_unready(self) -> Optional[int]: + return self.spec.get('maxUnready') @property def min_available(self) -> int: return self.spec.get('minAvailable', 0) @property - def ref(self) -> Mapping: - return { - "apiVersion": Poolboy.operator_api_version, - "kind": "ResourcePool", - "name": self.name, - "namespace": self.namespace, - } + def resource_provider_name(self) -> Optional[str]: + return self.spec.get('provider', {}).get('name') @property def resources(self) -> List[Mapping]: @@ -141,25 +134,14 @@ def vars(self) -> Mapping: return self.spec.get('vars', {}) def __register(self) -> None: - ResourcePool.instances[self.name] = self + self.instances[self.name] = self def __unregister(self) -> None: - ResourcePool.instances.pop(self.name, None) + self.instances.pop(self.name, None) - def refresh(self, - annotations: kopf.Annotations, - labels: kopf.Labels, - meta: kopf.Meta, - spec: kopf.Spec, - status: kopf.Status, - uid: str, - ) -> None: - self.annotations = annotations - self.labels = labels - self.meta = meta - self.spec = spec - self.status = status - self.uid = uid + async def get_resource_provider(self) -> ResourceProviderT: + """Return ResourceProvider configured to manage ResourceHandle.""" + return await resourceprovider.ResourceProvider.get(self.resource_provider_name) async def handle_delete(self, logger: kopf.ObjectLogger): await resourcehandle.ResourceHandle.delete_unbound_handles_for_pool(logger=logger, resource_pool=self) @@ -167,11 +149,65 @@ async def handle_delete(self, logger: kopf.ObjectLogger): async def manage(self, logger: kopf.ObjectLogger): async with self.lock: resource_handles = await resourcehandle.ResourceHandle.get_unbound_handles_for_pool(resource_pool=self, logger=logger) - resource_handle_deficit = self.min_available - len(resource_handles) - if resource_handle_deficit <= 0: - return - for i in range(resource_handle_deficit): - resource_handle = await resourcehandle.ResourceHandle.create_for_pool( - logger=logger, - resource_pool=self - ) + available_resource_handles = [] + ready_resource_handles = [] + resource_handles_for_status = [] + for resource_handle in resource_handles: + if self.delete_unhealthy_resource_handles and resource_handle.is_healthy == False: + logger.info(f"Deleting {resource_handle} in {self} due to failed health check") + await resource_handle.delete() + continue + available_resource_handles.append(resource_handle) + if resource_handle.is_ready: + ready_resource_handles.append(resource_handle) + resource_handles_for_status.append({ + "healthy": resource_handle.is_healthy, + "name": resource_handle.name, + "ready": resource_handle.is_ready, + }) + + resource_handle_deficit = self.min_available - len(available_resource_handles) + + if self.max_unready != None: + unready_count = len(available_resource_handles) - len(ready_resource_handles) + if resource_handle_deficit > self.max_unready - unready_count: + resource_handle_deficit = self.max_unready - unready_count + + if resource_handle_deficit > 0: + for i in range(resource_handle_deficit): + resource_handle = await resourcehandle.ResourceHandle.create_for_pool( + logger=logger, + resource_pool=self + ) + resource_handles_for_status.append({ + "name": resource_handle.name, + }) + + patch = [] + if not self.status: + patch.append({ + "op": "add", + "path": "/status", + "value": {}, + }) + + if self.status.get('resourceHandles') != resource_handles_for_status: + patch.append({ + "op": "add", + "path": "/status/resourceHandles", + "value": resource_handles_for_status, + }) + + resource_handle_count = { + "available": len(available_resource_handles), + "ready": len(ready_resource_handles), + } + if self.status.get('resourceHandleCount') != resource_handle_count: + patch.append({ + "op": "add", + "path": "/status/resourceHandleCount", + "value": resource_handle_count, + }) + + if patch: + await self.json_patch_status(patch) diff --git a/operator/resourceprovider.py b/operator/resourceprovider.py index 319fd40..26f6397 100644 --- a/operator/resourceprovider.py +++ b/operator/resourceprovider.py @@ -16,7 +16,7 @@ from deep_merge import deep_merge from jsonpatch_from_diff import jsonpatch_from_diff from poolboy import Poolboy -from poolboy_templating import recursive_process_template_strings +from poolboy_templating import check_condition, recursive_process_template_strings ResourceClaimT = TypeVar('ResourceClaimT', bound='ResourceClaim') ResourceHandleT = TypeVar('ResourceHandleT', bound='ResourceHandle') @@ -118,22 +118,22 @@ class ResourceProvider: instances = {} lock = asyncio.Lock() - @staticmethod - def __register_definition(definition: Mapping) -> ResourceProviderT: + @classmethod + def __register_definition(cls, definition: Mapping) -> ResourceProviderT: name = definition['metadata']['name'] - resource_provider = ResourceProvider.instances.get(name) + resource_provider = cls.instances.get(name) if resource_provider: resource_provider.definition = definition self.__init_resource_template_validator() else: - resource_provider = ResourceProvider(definition=definition) - ResourceProvider.instances[name] = resource_provider + resource_provider = cls(definition=definition) + cls.instances[name] = resource_provider return resource_provider - @staticmethod - def find_provider_by_template_match(template: Mapping) -> ResourceProviderT: + @classmethod + def find_provider_by_template_match(cls, template: Mapping) -> ResourceProviderT: provider_matches = [] - for provider in ResourceProvider.instances.values(): + for provider in cls.instances.values(): if provider.is_match_for_template(template): provider_matches.append(provider) if len(provider_matches) == 0: @@ -143,52 +143,59 @@ def find_provider_by_template_match(template: Mapping) -> ResourceProviderT: else: raise kopf.TemporaryError(f"Resource template matches multiple ResourceProviders", delay=600) - @staticmethod - async def get(name: str) -> ResourceProviderT: - async with ResourceProvider.lock: - resource_provider = ResourceProvider.instances.get(name) + @classmethod + async def get(cls, name: str) -> ResourceProviderT: + async with cls.lock: + resource_provider = cls.instances.get(name) if resource_provider: return resource_provider definition = await Poolboy.custom_objects_api.get_cluster_custom_object( - Poolboy.operator_domain, Poolboy.operator_version, 'resourceproviders', name + group = Poolboy.operator_domain, + name = name, + namespace = Poolboy.namespace, + plural = 'resourceproviders', + version = Poolboy.operator_version, ) - return ResourceProvider.__register_definition(definition=definition) + return cls.__register_definition(definition=definition) - @staticmethod - async def preload(logger: kopf.ObjectLogger) -> None: - async with ResourceProvider.lock: + @classmethod + async def preload(cls, logger: kopf.ObjectLogger) -> None: + async with cls.lock: _continue = None while True: resource_provider_list = await Poolboy.custom_objects_api.list_namespaced_custom_object( - Poolboy.operator_domain, Poolboy.operator_version, Poolboy.namespace, 'resourceproviders', + group = Poolboy.operator_domain, + namespace = Poolboy.namespace, + plural = 'resourceproviders', + version = Poolboy.operator_version, _continue = _continue, limit = 50, ) for definition in resource_provider_list['items']: - ResourceProvider.__register_definition(definition=definition) + cls.__register_definition(definition=definition) _continue = resource_provider_list['metadata'].get('continue') if not _continue: break - @staticmethod - async def register(definition: Mapping, logger: kopf.ObjectLogger) -> ResourceProviderT: - async with ResourceProvider.lock: + @classmethod + async def register(cls, definition: Mapping, logger: kopf.ObjectLogger) -> ResourceProviderT: + async with cls.lock: name = definition['metadata']['name'] - resource_provider = ResourceProvider.instances.get(name) + resource_provider = cls.instances.get(name) if resource_provider: resource_provider.__init__(definition=definition) logger.info(f"Refreshed definition of ResourceProvider {name}") else: - resource_provider = ResourceProvider.__register_definition(definition=definition) + resource_provider = cls.__register_definition(definition=definition) logger.info(f"Registered ResourceProvider {name}") return resource_provider - @staticmethod - async def unregister(name: str, logger: kopf.ObjectLogger) -> Optional[ResourceProviderT]: - async with ResourceProvider.lock: - if name in ResourceProvider.instances: + @classmethod + async def unregister(cls, name: str, logger: kopf.ObjectLogger) -> Optional[ResourceProviderT]: + async with cls.lock: + if name in cls.instances: logger.info(f"Unregistered ResourceProvider {name}") - return ResourceProvider.instances.pop(name) + return cls.instances.pop(name) def __init__(self, definition: Mapping) -> None: self.meta = definition['metadata'] @@ -217,12 +224,20 @@ def approval_required(self) -> bool: def create_disabled(self) -> bool: return self.spec.get('disableCreation', False) + @property + def has_lifespan(self) -> bool: + return 'lifespan' in self.spec + @property def has_template_definition(self) -> bool: return 'override' in self.spec or ( 'template' in self.spec and 'definition' in self.spec['template'] ) + @property + def lifespan_default(self) -> int: + return self.spec.get('lifespan', {}).get('default') + @property def lifespan_maximum(self) -> Optional[str]: return self.spec.get('lifespan', {}).get('maximum') @@ -231,6 +246,22 @@ def lifespan_maximum(self) -> Optional[str]: def lifespan_relative_maximum(self) -> Optional[str]: return self.spec.get('lifespan', {}).get('relativeMaximum') + @property + def lifespan_unclaimed(self) -> int: + return self.spec.get('lifespan', {}).get('unclaimed') + + @property + def lifespan_unclaimed_seconds(self) -> int: + interval = self.lifespan_unclaimed + if interval: + return pytimeparse.parse(interval) + + @property + def lifespan_unclaimed_timedelta(self): + seconds = self.lifespan_unclaimed_seconds + if seconds: + return timedelta(seconds=seconds) + @property def linked_resource_providers(self) -> List[ResourceProviderT]: return [ @@ -370,6 +401,44 @@ def as_reference(self) -> Mapping: namespace = self.namespace, ) + def check_health(self, + logger: kopf.ObjectLogger, + resource_handle: ResourceHandleT, + resource_state: Mapping, + ) -> Optional[bool]: + if 'healthCheck' not in self.spec: + return None + try: + return check_condition( + condition = self.spec['healthCheck'], + variables = { + **resource_state, + "resource_handle": resource_handle, + }, + ) + except Exception: + logger.exception("Failed health check on {resource_handle} with {self}") + return None + + def check_readiness(self, + logger: kopf.ObjectLogger, + resource_handle: ResourceHandleT, + resource_state: Mapping, + ) -> Optional[bool]: + if 'readinessCheck' not in self.spec: + return None + try: + return check_condition( + condition = self.spec['readinessCheck'], + variables = { + **resource_state, + "resource_handle": resource_handle, + }, + ) + except Exception: + logger.exception("Failed readiness check on {resource_handle} with {self}") + return None + def check_template_match(self, claim_resource_template: Mapping, handle_resource_template: Mapping, @@ -408,20 +477,21 @@ def get_parameters(self) -> List[_Parameter]: _Parameter(pd) for pd in self.spec.get('parameters', []) ] - async def get_claim_resources(self, - resource_claim: ResourceClaimT, + async def get_resources(self, parameter_values: Optional[Mapping] = None, + resource_claim: Optional[ResourceClaimT] = None, resource_handle: Optional[ResourceHandleT] = None, resource_name: Optional[str] = None, ) -> List[Mapping]: - """Return list of resources for managed ResourceClaim""" + """Return list of resources for ResourceClaim and/or ResourceHandle""" resources = [] if parameter_values == None: - parameter_values = { - **self.parameter_defaults, - **resource_claim.parameter_values, - } + parameter_values = {**self.parameter_defaults} + if resource_claim: + parameter_values.update(resource_claim.parameter_values) + elif resource_handle: + parameter_values.update(resource_handle.parameter_values) resource_handle_vars = resource_handle.vars if resource_handle else {} vars_ = { @@ -435,9 +505,9 @@ async def get_claim_resources(self, resources = [] for linked_resource_provider in self.linked_resource_providers: - resource_provider = await ResourceProvider.get(linked_resource_provider.name) + resource_provider = await self.get(linked_resource_provider.name) resources.extend( - await resource_provider.get_claim_resources( + await resource_provider.get_resources( resource_claim = resource_claim, resource_handle = resource_handle, resource_name = linked_resource_provider.resource_name, @@ -473,17 +543,23 @@ def is_match_for_template(self, template: Mapping) -> bool: return template == cmp_template def make_status_summary(self, - resource_claim: ResourceClaimT, + resource_claim: Optional[ResourceClaimT] = None, + resource_handle: Optional[ResourceHandleT] = None, + resources: List[Mapping] = [], ) -> Mapping: + variables = {**self.vars} + if resource_claim: + variables.update(resource_claim.parameter_values) + else: + variables.update(resource_handle.parameter_values) + + variables['resource_claim'] = resource_claim + variables['resource_handle'] = resource_handle + variables['resources'] = resources + return recursive_process_template_strings( self.status_summary_template, - variables = { - **self.vars, - **resource_claim.parameter_values, - "resource_claim": resource_claim, - "resource_provider": self, - "resources": resource_claim.status_resources, - } + variables = variables, ) def processed_template(self, diff --git a/operator/resourcewatcher.py b/operator/resourcewatcher.py index dedfdb7..04aa503 100644 --- a/operator/resourcewatcher.py +++ b/operator/resourcewatcher.py @@ -4,7 +4,7 @@ import logging from datetime import datetime, timezone -from typing import Mapping, Optional +from typing import Mapping, Optional, TypeVar import poolboy_k8s import resourceclaim @@ -25,32 +25,77 @@ class ResourceWatchFailedError(Exception): class ResourceWatchRestartError(Exception): pass +ResourceWatcherT = TypeVar('ResourceWatcherT', bound='ResourceWatcher') + class ResourceWatcher: instances = {} - lock = asyncio.Lock() + class_lock = asyncio.Lock() + + class CacheEntry: + def __init__(self, resource): + self.resource = resource + self.cache_datetime = datetime.now(timezone.utc) + + @property + def is_expired(self): + return (datetime.now(timezone.utc) - self.cache_datetime).total_seconds() > Poolboy.resource_refresh_interval + + @classmethod + def get_watcher(cls, + api_version: str, + kind: str, + namespace: Optional[str] = None, + ) -> Optional[ResourceWatcherT]: + key = (api_version, kind, namespace) if namespace else (api_version, kind) + return ResourceWatcher.instances.get(key) + + @classmethod + async def get_resource(cls, + api_version: str, + kind: str, + name: str, + namespace: Optional[str] = None, + ) -> Optional[Mapping]: + watcher = cls.get_watcher(api_version=api_version, kind=kind, namespace=namespace) + if watcher: + cache_entry = watcher.cache.get(name) + if cache_entry and not cache_entry.is_expired: + return cache_entry.resource + try: + resource = await poolboy_k8s.get_object(api_version=api_version, kind=kind, name=name, namespace=namespace) + if resource and watcher: + watcher.cache[name] = ResourceWatcher.CacheEntry(resource) + return resource + except kubernetes_asyncio.client.exceptions.ApiException as exception: + if exception.status == 404: + return None + else: + raise - async def start_resource_watch( + @classmethod + async def start_resource_watch(cls, api_version: str, kind: str, namespace: str, ) -> None: key = (api_version, kind, namespace) if namespace else (api_version, kind) - async with ResourceWatcher.lock: - resource_watcher = ResourceWatcher.instances.get(key) + async with cls.class_lock: + resource_watcher = cls.instances.get(key) if resource_watcher: return - resource_watcher = ResourceWatcher( + resource_watcher = cls( api_version = api_version, kind = kind, namespace = namespace, ) - ResourceWatcher.instances[key] = resource_watcher + cls.instances[key] = resource_watcher resource_watcher.start() - async def stop_all(): - async with ResourceWatcher.lock: + @classmethod + async def stop_all(cls): + async with cls.class_lock: tasks = [] - for resource_watcher in ResourceWatcher.instances.values(): + for resource_watcher in cls.instances.values(): resource_watcher.cancel() tasks.append(resource_watcher.task) await asyncio.gather(*tasks) @@ -133,6 +178,7 @@ async def watch(self): async def __watch(self, method, **kwargs): watch = None + self.cache.clear() try: _continue = None while True: @@ -163,9 +209,15 @@ async def __watch(self, method, **kwargs): else: raise ResourceWatchFailedError(f"UNKNOWN EVENT: {event}") + name = event_obj['metadata']['name'] + if event_type == 'DELETED': + self.cache.pop(name, None) + else: + self.cache[name] = self.CacheEntry(event_obj) + await self.__watch_event(event_type=event_type, event_obj=event_obj) - except kubernetes_asyncio.client.exceptions.ApiException as e: - if e.status == 410: + except kubernetes_asyncio.client.exceptions.ApiException as exception: + if exception.status == 410: raise ResourceWatchRestartError("Received 410 expired response.") else: raise @@ -195,11 +247,7 @@ async def __watch_event(self, event_type, event_obj): resource_handle = resourcehandle.ResourceHandle.get_from_cache(name=resource_handle_name) if resource_handle: - await resource_handle.handle_resource_event( - logger = logger, - resource_index = resource_index, - resource_state = event_obj, - ) + await resource_handle.handle_resource_event(logger=logger) else: logger.debug( f"Received event for ResourceHandle {resource_handle_name} " @@ -246,7 +294,10 @@ async def __watch_event(self, event_type, event_obj): ) elif event_type == 'DELETED': if prev_state: - await resource_claim.remove_resource_from_status(resource_index) + await resource_claim.remove_resource_from_status( + index=resource_index, + logger=logger, + ) else: logger.info( f"Ignoring resource delete for {resource_claim_description} due to resource " @@ -254,15 +305,19 @@ async def __watch_event(self, event_type, event_obj): ) else: logger.debug(f"Updating {resource_description} in {resource_claim_description}") - await resource_claim.update_resource_in_status(resource_index, event_obj) - except kubernetes_asyncio.client.exceptions.ApiException as e: - if e.status != 404: + await resource_claim.update_resource_in_status( + index=resource_index, + logger=logger, + state=event_obj, + ) + except kubernetes_asyncio.client.exceptions.ApiException as exception: + if exception.status != 404: logger.warning( - f"Received {e.status} response when attempting to patch resource state for " + f"Received {exception.status} response when attempting to patch resource state for " f"{event_type.lower()} {resource_description} for {resource_claim_description}: " - f"{e}" + f"{exception}" ) - except Exception as e: + except Exception: logger.exception( f"Exception when attempting to patch resource state for {event_type.lower()} resource " f"for {resource_claim_description}" diff --git a/test/roles/poolboy_test_simple/tasks/test-approval-01.yaml b/test/roles/poolboy_test_simple/tasks/test-approval-01.yaml index 4b6d214..9a4cccc 100644 --- a/test/roles/poolboy_test_simple/tasks/test-approval-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-approval-01.yaml @@ -80,7 +80,7 @@ status_patch: approval: state: approved - + - name: Verify handling of ResourceClaim test-approval-01 kubernetes.core.k8s_info: api_version: "{{ poolboy_domain }}/v1" diff --git a/test/roles/poolboy_test_simple/tasks/test-auto-delete-01.yaml b/test/roles/poolboy_test_simple/tasks/test-auto-delete-01.yaml index eeb823e..316bc0f 100644 --- a/test/roles/poolboy_test_simple/tasks/test-auto-delete-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-auto-delete-01.yaml @@ -33,7 +33,7 @@ - pathMatch: /spec/.* allowedOps: - replace - + - name: Create ResourceClaim test-auto-delete-01-a kubernetes.core.k8s: definition: diff --git a/test/roles/poolboy_test_simple/tasks/test-auto-detach-01.yaml b/test/roles/poolboy_test_simple/tasks/test-auto-detach-01.yaml index 5f18dc7..d07935f 100644 --- a/test/roles/poolboy_test_simple/tasks/test-auto-detach-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-auto-detach-01.yaml @@ -33,7 +33,7 @@ - pathMatch: /spec/.* allowedOps: - replace - + - name: Create ResourceClaim test-auto-detach-01-a kubernetes.core.k8s: definition: diff --git a/test/roles/poolboy_test_simple/tasks/test-ignore-01.yaml b/test/roles/poolboy_test_simple/tasks/test-ignore-01.yaml index 743a4b0..afd803c 100644 --- a/test/roles/poolboy_test_simple/tasks/test-ignore-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-ignore-01.yaml @@ -223,7 +223,7 @@ apiVersion: "{{ poolboy_domain }}/v1" kind: ResourceClaim metadata: - annotations: + annotations: poolboy.dev.local/resource-claim-init-timestamp: "1970-01-01T00:00:00Z" finalizers: - "{{ poolboy_domain }}" @@ -417,7 +417,7 @@ spec: stringvalue: one ts: "1970-01-01T00:00:00Z" - + - name: Clear ignore label on ResourceHandle for test-ignore-01-a kubernetes.core.k8s: api_version: "{{ poolboy_domain }}/v1" diff --git a/test/roles/poolboy_test_simple/tasks/test-linked-01.yaml b/test/roles/poolboy_test_simple/tasks/test-linked-01.yaml index b88a647..7b40dfb 100644 --- a/test/roles/poolboy_test_simple/tasks/test-linked-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-linked-01.yaml @@ -179,7 +179,7 @@ __state: "{{ r_get_resource_handle.resources[0] }}" assert: that: - - __state.spec.resources[1].waitingFor == 'Linked ResourceProvider' + - __state.status.resources[1].waitingFor == 'Linked ResourceProvider' - name: Verify creation of ResourceClaimTest test-linked-01-a-base kubernetes.core.k8s_info: diff --git a/test/roles/poolboy_test_simple/tasks/test-parameters-01.yaml b/test/roles/poolboy_test_simple/tasks/test-parameters-01.yaml index e669111..472b516 100644 --- a/test/roles/poolboy_test_simple/tasks/test-parameters-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-parameters-01.yaml @@ -58,7 +58,7 @@ - pathMatch: /spec/.* allowedOps: - replace - + - name: Create ResourceClaim test-parameters-01-a kubernetes.core.k8s: definition: @@ -90,7 +90,7 @@ delay: 1 retries: 10 -- name: Save facts from for ResourceClaim test-parameters-01-a +- name: Set facts from ResourceClaim test-parameters-01-a vars: __name: >- {{ r_get_resource_claim.resources[0].status.resourceHandle.name }} @@ -107,6 +107,23 @@ - __state.metadata.annotations['test-annotation'] == 'test-annotation-value' - __state.metadata.labels['test-label'] == 'test-label-value' +- name: Get ResourceHandle for test-parameter-01-a + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_parameters_01_a_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 1 + +- name: Verify state of ResourceHandle for test-parameter-01-a + vars: + __state: "{{ r_get_resource_handle.resources[0] }}" + assert: + that: + - __state.spec.provider.name == 'test-parameters-01' + - __state.spec.provider.parameterValues == {"numbervar": 1} + - name: Verify creation of ResourceClaimTest test-parameters-01-a kubernetes.core.k8s_info: api_version: "{{ poolboy_domain }}/v1" @@ -140,6 +157,23 @@ stringvar: two numbervar: 2 +- name: Get ResourceHandle for test-parameter-01-a after update + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_parameters_01_a_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 1 + +- name: Verify state of ResourceHandle for test-parameter-01-a after update + vars: + __state: "{{ r_get_resource_handle.resources[0] }}" + assert: + that: + - __state.spec.provider.name == 'test-parameters-01' + - '__state.spec.provider.parameterValues == {"numbervar": 2, "stringvar": "two"}' + - name: Verify update of ResourceClaimTest for test-parameters-01-a kubernetes.core.k8s_info: api_version: "{{ poolboy_domain }}/v1" diff --git a/test/roles/poolboy_test_simple/tasks/test-parameters-03.yaml b/test/roles/poolboy_test_simple/tasks/test-parameters-03.yaml index 8ff031e..07e5b87 100644 --- a/test/roles/poolboy_test_simple/tasks/test-parameters-03.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-parameters-03.yaml @@ -49,7 +49,7 @@ allowedOps: - add - replace - + - name: Create ResourceClaim test-parameters-03-a kubernetes.core.k8s: definition: diff --git a/test/roles/poolboy_test_simple/tasks/test-parameters-04.yaml b/test/roles/poolboy_test_simple/tasks/test-parameters-04.yaml index 943b5ca..7020887 100644 --- a/test/roles/poolboy_test_simple/tasks/test-parameters-04.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-parameters-04.yaml @@ -58,7 +58,7 @@ allowedOps: - add - replace - + - name: Create ResourceClaim test-parameters-04-a kubernetes.core.k8s: definition: diff --git a/test/roles/poolboy_test_simple/tasks/test-parameters-05.yaml b/test/roles/poolboy_test_simple/tasks/test-parameters-05.yaml index 7de8e72..e5c4e11 100644 --- a/test/roles/poolboy_test_simple/tasks/test-parameters-05.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-parameters-05.yaml @@ -53,7 +53,7 @@ allowedOps: - add - replace - + - name: Create ResourceClaim test-parameters-05-a kubernetes.core.k8s: definition: diff --git a/test/roles/poolboy_test_simple/tasks/test-pool-03.yaml b/test/roles/poolboy_test_simple/tasks/test-pool-03.yaml new file mode 100644 index 0000000..079b558 --- /dev/null +++ b/test/roles/poolboy_test_simple/tasks/test-pool-03.yaml @@ -0,0 +1,329 @@ +--- +- name: Create ResourceProvider test-pool-03 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceProvider + metadata: + name: test-pool-03 + namespace: "{{ poolboy_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + override: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceClaimTest + metadata: + name: "test-pool-03-{% raw %}{{ guid }}{% endraw %}" + namespace: "{{ poolboy_test_namespace }}" + parameters: + - name: stringvar + allowUpdate: true + required: true + validation: + openAPIV3Schema: + type: string + default: one + enum: + - one + - two + - three + - name: numbervar + allowUpdate: true + validation: + openAPIV3Schema: + type: integer + default: 0 + minimum: 0 + template: + definition: + spec: + numbervalue: "{% raw %}{{ numbervar | int }}{% endraw %}" + stringvalue: "{% raw %}{{ stringvar }}{% endraw %}" + enable: true + updateFilters: + - pathMatch: /spec/.* + allowedOps: + - replace + +- name: Create ResourcePool test-pool-03 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourcePool + metadata: + name: test-pool-03 + namespace: "{{ poolboy_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + minAvailable: 2 + provider: + name: test-pool-03 + parameterValues: + numbervar: 1 + +- name: Verify ResourceHandles for test-pool-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-03" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 2 or + __unbound_handles[0].spec.resources is undefined or + __unbound_handles[0].spec.resources[0].reference is undefined + until: r_get_resource_handles is success + delay: 1 + retries: 10 + +- name: Set facts from for ResourcePool test-pool-03 ResourceHandles + vars: + __state: >- + {{ r_get_resource_handles.resources[0] }} + set_fact: + resource_claim_test_name: "{{ __state.spec.resources[0].reference.name }}" + +- name: Verify creation of ResourceClaimTest for test-pool-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaimTest + name: "{{ resource_claim_test_name }}" + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim_test + failed_when: r_get_resource_claim_test.resources | length != 1 + until: r_get_resource_claim_test is success + delay: 1 + retries: 10 + +- name: Verify state of ResourceClaimTest for test-pool-03 + vars: + __state: "{{ r_get_resource_claim_test.resources[0] }}" + assert: + that: + - __state.spec.numbervalue == 1 + - __state.spec.stringvalue == "one" + +- name: Create ResourceClaim test-pool-03 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + metadata: + name: test-pool-03 + namespace: "{{ poolboy_test_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + provider: + name: test-pool-03 + parameterValues: + numbervar: 1 + +- name: Verify handling of ResourceClaim test-pool-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03 + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: >- + r_get_resource_claim.resources[0].status.resources[0].state is undefined + until: r_get_resource_claim is success + delay: 1 + retries: 10 + +- name: Save facts from for ResourceClaim test-pool-03 + set_fact: + resource_claim_test_pool_03_guid: >- + {{ r_get_resource_claim.resources[0].status.resourceHandle.name[-5:] }} + resource_claim_test_pool_03_resource_handle_name: >- + {{ r_get_resource_claim.resources[0].status.resourceHandle.name }} + +- name: Verify state of ResourceClaim test-pool-03 + vars: + __state: "{{ r_get_resource_claim.resources[0] }}" + assert: + that: + - __state.status.resources[0].state.metadata.name == 'test-pool-03-' ~ resource_claim_test_pool_03_guid + +- name: Verify ResourceHandles for test-pool-03 after bind + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-03" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 2 + +- name: Create ResourceClaim test-pool-03-no-parameters + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + metadata: + name: test-pool-03-no-parameters + namespace: "{{ poolboy_test_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + resources: + - name: test-pool-03 + provider: + apiVersion: poolboy.dev.local/v1 + kind: ResourceProvider + name: test-pool-03 + namespace: poolboy-dev + template: + spec: + numbervalue: 1 + stringvalue: one + +- name: Verify handling of ResourceClaim test-pool-03-no-parameters + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03-no-parameters + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: >- + r_get_resource_claim.resources[0].status.resourceHandle is undefined + until: r_get_resource_claim is success + delay: 1 + retries: 10 + +- name: Save facts from for ResourceClaim test-pool-03-no-parameters + set_fact: + resource_claim_test_pool_03_no_parameters_resource_handle_name: >- + {{ r_get_resource_claim.resources[0].status.resourceHandle.name }} + +- name: Verify state of ResourceHandle for ResourceClaim test-pool-03-no-parameters + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ r_get_resource_claim.resources[0].status.resourceHandle.name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + +- name: Verify state of ResourceHandle for test-pool-03-no-parameters + vars: + __state: "{{ r_get_resource_handle.resources[0] }}" + assert: + that: + - __state.spec.provider is undefined + +- name: Delete ResourcePool test-pool-03 + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourcePool + name: test-pool-03 + namespace: "{{ poolboy_namespace }}" + state: absent + +- name: Verify cleanup of ResourceHandles for test-pool-03 after delete + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-03" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 0 + until: r_get_resource_handles is successful + retries: 5 + delay: 2 + +- name: Verify ResourceHandle for test-pool-03 not deleted along with pool + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + name: "{{ resource_claim_test_pool_03_resource_handle_name }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 1 + +- name: Delete ResourceClaim test-pool-03 + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03 + namespace: "{{ poolboy_test_namespace }}" + state: absent + +- name: Verify delete of ResourceClaim test-pool-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03 + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: r_get_resource_claim.resources | length != 0 + until: r_get_resource_claim is success + retries: 5 + delay: 1 + +- name: Verify delete of ResourceHandle for test-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_pool_03_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 0 + until: r_get_resource_handle is success + retries: 5 + delay: 1 + +- name: Delete ResourceClaim test-pool-03-no-parameters + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03-no-parameters + namespace: "{{ poolboy_test_namespace }}" + state: absent + +- name: Verify delete of ResourceClaim test-pool-03-no-parameters + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-03 + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: r_get_resource_claim.resources | length != 0 + until: r_get_resource_claim is success + retries: 5 + delay: 1 + +- name: Verify delete of ResourceHandle for test-03 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_pool_03_no_parameters_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 0 + until: r_get_resource_handle is success + retries: 5 + delay: 1 diff --git a/test/roles/poolboy_test_simple/tasks/test-pool-04.yaml b/test/roles/poolboy_test_simple/tasks/test-pool-04.yaml new file mode 100644 index 0000000..0d0b42d --- /dev/null +++ b/test/roles/poolboy_test_simple/tasks/test-pool-04.yaml @@ -0,0 +1,218 @@ +--- +- name: Create ResourceProvider test-pool-04 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceProvider + metadata: + name: test-pool-04 + namespace: "{{ poolboy_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + healthCheck: >- + spec.numbervalue > 0 + override: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceClaimTest + metadata: + name: "test-pool-04-{% raw %}{{ guid }}{% endraw %}" + namespace: "{{ poolboy_test_namespace }}" + parameters: + - name: numbervar + allowUpdate: true + validation: + openAPIV3Schema: + type: integer + default: 0 + minimum: 0 + readinessCheck: >- + spec.numbervalue > 1 + template: + definition: + spec: + numbervalue: "{% raw %}{{ numbervar | int }}{% endraw %}" + enable: true + updateFilters: + - pathMatch: /spec/.* + allowedOps: + - replace + +- name: Create ResourcePool test-pool-04 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourcePool + metadata: + name: test-pool-04 + namespace: "{{ poolboy_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + minAvailable: 1 + provider: + name: test-pool-04 + parameterValues: + numbervar: 0 + +- name: Verify ResourceHandle for test-pool-04 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-04" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 1 or + __unbound_handles[0].spec.resources is undefined or + __unbound_handles[0].spec.resources[0].reference is undefined or + __unbound_handles[0].status.ready != false or + __unbound_handles[0].status.healthy != false or + __unbound_handles[0].status.resources[0].ready != false or + __unbound_handles[0].status.resources[0].healthy != false + until: r_get_resource_handles is success + delay: 1 + retries: 10 + +- name: Set facts from for ResourcePool test-pool-04 ResourceHandles + vars: + __state: >- + {{ r_get_resource_handles.resources[0] }} + set_fact: + failed_resource_handle_name: "{{ __state.metadata.name }}" + resource_claim_test_name: "{{ __state.spec.resources[0].reference.name }}" + +- name: Create ResourceClaim test-pool-04 + kubernetes.core.k8s: + definition: + apiVersion: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + metadata: + name: test-pool-04 + namespace: "{{ poolboy_test_namespace }}" + labels: >- + {{ { + poolboy_domain ~ "/test": "simple" + } }} + spec: + provider: + name: test-pool-04 + parameterValues: + numbervar: 0 + +- name: Verify handling of ResourceClaim test-pool-04 does not bind to unhealthy ResourceHandle + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-04 + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: >- + r_get_resource_claim.resources[0].status.resources[0].state is undefined or + r_get_resource_claim.resources[0].status.resourceHandle is undefined or + r_get_resource_claim.resources[0].status.resourceHandle.name == failed_resource_handle_name + until: r_get_resource_claim is success + delay: 1 + retries: 10 + +- name: Update ResourcePool test-pool-04 to create healthy but unready ResourceHandles + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourcePool + name: test-pool-04 + namespace: "{{ poolboy_namespace }}" + definition: + spec: + deleteUnhealthyResourceHandles: true + provider: + parameterValues: + numbervar: 1 + +- name: Verify that failed ResourceHandle is deleted + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ failed_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 0 + until: r_get_resource_handle is success + delay: 1 + retries: 10 + +- name: Verify new ResourceHandle for test-pool-04 is created + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-04" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 1 or + __unbound_handles[0].spec.resources is undefined or + __unbound_handles[0].spec.resources[0].reference is undefined or + __unbound_handles[0].status.ready != false or + __unbound_handles[0].status.healthy != true or + __unbound_handles[0].status.resources[0].ready != false or + __unbound_handles[0].status.resources[0].healthy != true + until: r_get_resource_handles is success + delay: 1 + retries: 10 + +- name: Delete ResourcePool test-pool-04 + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourcePool + name: test-pool-04 + namespace: "{{ poolboy_namespace }}" + state: absent + +- name: Verify cleanup of ResourceHandles for test-pool-04 after delete + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + namespace: "{{ poolboy_namespace }}" + label_selectors: + - "{{ poolboy_domain }}/resource-pool-name = test-pool-04" + register: r_get_resource_handles + vars: + __unbound_handles: >- + {{ r_get_resource_handles.resources | json_query('[?spec.resourceClaim==null]') }} + failed_when: >- + __unbound_handles | length != 0 + until: r_get_resource_handles is successful + retries: 5 + delay: 2 + +- name: Delete ResourceClaim test-pool-04 + kubernetes.core.k8s: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-04 + namespace: "{{ poolboy_test_namespace }}" + state: absent + +- name: Verify delete of ResourceClaim test-pool-04 + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceClaim + name: test-pool-04 + namespace: "{{ poolboy_test_namespace }}" + register: r_get_resource_claim + failed_when: r_get_resource_claim.resources | length != 0 + until: r_get_resource_claim is success + retries: 5 + delay: 1 +... diff --git a/test/roles/poolboy_test_simple/tasks/test-requester-01.yaml b/test/roles/poolboy_test_simple/tasks/test-requester-01.yaml index b7e017e..e4c548c 100644 --- a/test/roles/poolboy_test_simple/tasks/test-requester-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-requester-01.yaml @@ -105,7 +105,7 @@ namespace: "{{ poolboy_namespace }}" register: r_get_resource_handle failed_when: >- - r_get_resource_handle.resources[0].spec.resources[0].waitingFor != 'ResourceClaim' + r_get_resource_handle.resources[0].status.resources[0].waitingFor != 'ResourceClaim' until: r_get_resource_handle is success delay: 1 retries: 10 diff --git a/test/roles/poolboy_test_simple/tasks/test-status-summary-01.yaml b/test/roles/poolboy_test_simple/tasks/test-status-summary-01.yaml index f66e3ce..40a1e14 100644 --- a/test/roles/poolboy_test_simple/tasks/test-status-summary-01.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-status-summary-01.yaml @@ -41,7 +41,7 @@ - pathMatch: /spec/.* allowedOps: - replace - + - name: Create ResourceClaim test-status-summary-01-a kubernetes.core.k8s: definition: @@ -88,6 +88,22 @@ - __state.status.resources[0].state.metadata.name == resource_claim_test_status_summary_01_a_resource_name - __state.status.summary.stringValue == 'one' +- name: Get ResourceHandle for test-status-summary-01-a + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_status_summary_01_a_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 1 + +- name: Verify state of ResourceHandle for test-status-summary-01-a + vars: + __state: "{{ r_get_resource_handle.resources[0] }}" + assert: + that: + - '__state.status.summary == {"stringValue": "one"}' + - name: Update parameters of ResourceClaim test-status-summary-01-a kubernetes.core.k8s: api_version: "{{ poolboy_domain }}/v1" @@ -110,11 +126,26 @@ failed_when: >- r_get_resource_claim.resources[0].status.resources[0].state is undefined or r_get_resource_claim.resources[0].status.summary.stringValue != 'two' - until: r_get_resource_claim is success delay: 1 retries: 10 +- name: Get ResourceHandle for test-status-summary-01-a after update + kubernetes.core.k8s_info: + api_version: "{{ poolboy_domain }}/v1" + kind: ResourceHandle + name: "{{ resource_claim_test_status_summary_01_a_resource_handle_name }}" + namespace: "{{ poolboy_namespace }}" + register: r_get_resource_handle + failed_when: r_get_resource_handle.resources | length != 1 + +- name: Verify state of ResourceHandle for test-status-summary-01-a after update + vars: + __state: "{{ r_get_resource_handle.resources[0] }}" + assert: + that: + - '__state.status.summary == {"stringValue": "two"}' + - name: Delete ResourceClaim test-status-summary-01-a kubernetes.core.k8s: api_version: "{{ poolboy_domain }}/v1" diff --git a/test/roles/poolboy_test_simple/tasks/test-vars-02.yaml b/test/roles/poolboy_test_simple/tasks/test-vars-02.yaml index a5eb39c..b0edcda 100644 --- a/test/roles/poolboy_test_simple/tasks/test-vars-02.yaml +++ b/test/roles/poolboy_test_simple/tasks/test-vars-02.yaml @@ -166,7 +166,7 @@ name: test-vars-02-a spec: count: 10 - + - name: Verify template validation success for ResourceClaim kubernetes.core.k8s_info: api_version: "{{ poolboy_domain }}/v1" diff --git a/test/roles/poolboy_test_simple/tasks/test.yaml b/test/roles/poolboy_test_simple/tasks/test.yaml index ee7a701..2639a13 100644 --- a/test/roles/poolboy_test_simple/tasks/test.yaml +++ b/test/roles/poolboy_test_simple/tasks/test.yaml @@ -11,6 +11,8 @@ - test-linked-02.yaml - test-pool-01.yaml - test-pool-02.yaml + - test-pool-03.yaml + - test-pool-04.yaml - test-vars-01.yaml - test-vars-02.yaml - test-vars-03.yaml