diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 551b7f5d79..d4fbfaf839 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -24,8 +24,8 @@ A clear and concise description of what you expected to happen. * Version of Kubernetes * Kubernetes platform (e.g. Mini-kube or GCP) * Details on how you expose the NGINX Gateway Fabric Pod (e.g. Service of type LoadBalancer or port-forward) -* Logs of NGINX container: `kubectl -n nginx-gateway logs -l app=nginx-gateway -c nginx` -* NGINX Configuration: `kubectl -n nginx-gateway exec -c nginx -- nginx -T` +* Logs of NGINX container: `kubectl -n logs deployments/` +* NGINX Configuration: `kubectl -n exec -it deployments/ -- nginx -T` **Additional context** Add any other context about the problem here. Any log files you want to share. diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml index 9d0fcda739..22b52ac0bd 100644 --- a/.github/workflows/conformance.yml +++ b/.github/workflows/conformance.yml @@ -76,13 +76,6 @@ jobs: type=ref,event=pr type=ref,event=branch,suffix=-rc,enable=${{ startsWith(github.ref, 'refs/heads/release') }} - - name: Generate static deployment - run: | - ngf_prefix=ghcr.io/nginx/nginx-gateway-fabric - ngf_tag=${{ steps.ngf-meta.outputs.version }} - make generate-static-deployment PLUS_ENABLED=${{ inputs.image == 'plus' && 'true' || 'false' }} PREFIX=${ngf_prefix} TAG=${ngf_tag} - working-directory: ./tests - - name: Build binary uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0 with: @@ -151,7 +144,6 @@ jobs: ngf_tag=${{ steps.ngf-meta.outputs.version }} if [ ${{ github.event_name }} == "schedule" ]; then export GW_API_VERSION=main; fi make helm-install-local${{ inputs.image == 'plus' && '-with-plus' || ''}} PREFIX=${ngf_prefix} TAG=${ngf_tag} - make deploy-updated-provisioner PREFIX=${ngf_prefix} TAG=${ngf_tag} working-directory: ./tests - name: Run conformance tests diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yml index fd0a791125..21c03b6d76 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yml @@ -176,4 +176,4 @@ jobs: --set=nginx.plus=${{ inputs.image == 'plus' }} \ --set=nginx.image.tag=nightly \ --set=nginxGateway.productTelemetry.enable=false \ - ${{ inputs.image == 'plus' && '--set=serviceAccount.imagePullSecret=nginx-plus-registry-secret --set=nginx.image.repository=private-registry.nginx.com/nginx-gateway-fabric/nginx-plus' || '' }}" + ${{ inputs.image == 'plus' && '--set=nginx.imagePullSecret=nginx-plus-registry-secret --set=nginx.image.repository=private-registry.nginx.com/nginx-gateway-fabric/nginx-plus' || '' }}" diff --git a/.github/workflows/nfr.yml b/.github/workflows/nfr.yml index 5eabd96b88..c8968cb056 100644 --- a/.github/workflows/nfr.yml +++ b/.github/workflows/nfr.yml @@ -92,6 +92,13 @@ jobs: workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY }} service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} + - name: Login to GAR + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + with: + registry: us-docker.pkg.dev + username: oauth2accesstoken + password: ${{ steps.auth.outputs.access_token }} + - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 with: diff --git a/.yamllint.yaml b/.yamllint.yaml index b2d07c848f..e52cae4940 100644 --- a/.yamllint.yaml +++ b/.yamllint.yaml @@ -2,8 +2,7 @@ ignore: - charts/nginx-gateway-fabric/templates - config/crd/bases/ - - deploy/crds.yaml - - deploy/*nginx-plus + - deploy - site/static rules: @@ -15,7 +14,9 @@ rules: require-starting-space: true ignore-shebangs: true min-spaces-from-content: 1 - comments-indentation: enable + comments-indentation: + ignore: | + charts/nginx-gateway-fabric/values.yaml document-end: disable document-start: disable empty-lines: enable diff --git a/Makefile b/Makefile index e7f758d95b..269fabd91c 100644 --- a/Makefile +++ b/Makefile @@ -226,13 +226,13 @@ install-ngf-local-build-with-plus: check-for-plus-usage-endpoint build-images-wi .PHONY: helm-install-local helm-install-local: install-gateway-crds ## Helm install NGF on configured kind cluster with local images. To build, load, and install with helm run make install-ngf-local-build. - helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PREFIX) --create-namespace --wait --set nginxGateway.image.pullPolicy=Never --set service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway $(HELM_PARAMETERS) + helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PREFIX) --create-namespace --wait --set nginxGateway.image.pullPolicy=Never --set nginx.service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway $(HELM_PARAMETERS) .PHONY: helm-install-local-with-plus helm-install-local-with-plus: check-for-plus-usage-endpoint install-gateway-crds ## Helm install NGF with NGINX Plus on configured kind cluster with local images. To build, load, and install with helm run make install-ngf-local-build-with-plus. kubectl create namespace nginx-gateway || true kubectl -n nginx-gateway create secret generic nplus-license --from-file $(PLUS_LICENSE_FILE) || true - helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PLUS_PREFIX) --wait --set nginxGateway.image.pullPolicy=Never --set service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway --set nginx.plus=true --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) $(HELM_PARAMETERS) + helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PLUS_PREFIX) --wait --set nginxGateway.image.pullPolicy=Never --set nginx.service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway --set nginx.plus=true --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) $(HELM_PARAMETERS) .PHONY: check-for-plus-usage-endpoint check-for-plus-usage-endpoint: ## Checks that the PLUS_USAGE_ENDPOINT is set in the environment. This env var is required when deploying or testing with N+. diff --git a/apis/v1alpha1/nginxproxy_types.go b/apis/v1alpha1/nginxproxy_types.go deleted file mode 100644 index ed4ea9ed3d..0000000000 --- a/apis/v1alpha1/nginxproxy_types.go +++ /dev/null @@ -1,282 +0,0 @@ -package v1alpha1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +kubebuilder:object:root=true -// +kubebuilder:storageversion -// +kubebuilder:resource:categories=nginx-gateway-fabric,scope=Cluster -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` - -// NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way -// to configure global settings for all Gateways defined from the GatewayClass. -type NginxProxy struct { //nolint:govet // standard field alignment, don't change it - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired state of the NginxProxy. - Spec NginxProxySpec `json:"spec"` -} - -// +kubebuilder:object:root=true - -// NginxProxyList contains a list of NginxProxies. -type NginxProxyList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NginxProxy `json:"items"` -} - -// NginxProxySpec defines the desired state of the NginxProxy. -type NginxProxySpec struct { - // IPFamily specifies the IP family to be used by the NGINX. - // Default is "dual", meaning the server will use both IPv4 and IPv6. - // - // +optional - // +kubebuilder:default:=dual - IPFamily *IPFamilyType `json:"ipFamily,omitempty"` - // Telemetry specifies the OpenTelemetry configuration. - // - // +optional - Telemetry *Telemetry `json:"telemetry,omitempty"` - // RewriteClientIP defines configuration for rewriting the client IP to the original client's IP. - // +kubebuilder:validation:XValidation:message="if mode is set, trustedAddresses is a required field",rule="!(has(self.mode) && (!has(self.trustedAddresses) || size(self.trustedAddresses) == 0))" - // - // +optional - //nolint:lll - RewriteClientIP *RewriteClientIP `json:"rewriteClientIP,omitempty"` - // Logging defines logging related settings for NGINX. - // - // +optional - Logging *NginxLogging `json:"logging,omitempty"` - // NginxPlus specifies NGINX Plus additional settings. - // - // +optional - NginxPlus *NginxPlus `json:"nginxPlus,omitempty"` - // DisableHTTP2 defines if http2 should be disabled for all servers. - // Default is false, meaning http2 will be enabled for all servers. - DisableHTTP2 bool `json:"disableHTTP2,omitempty"` -} - -// NginxPlus specifies NGINX Plus additional settings. These will only be applied if NGINX Plus is being used. -type NginxPlus struct { - // AllowedAddresses specifies IPAddresses or CIDR blocks to the allow list for accessing the NGINX Plus API. - // - // +optional - AllowedAddresses []NginxPlusAllowAddress `json:"allowedAddresses,omitempty"` -} - -// Telemetry specifies the OpenTelemetry configuration. -type Telemetry struct { - // Exporter specifies OpenTelemetry export parameters. - // - // +optional - Exporter *TelemetryExporter `json:"exporter,omitempty"` - - // ServiceName is the "service.name" attribute of the OpenTelemetry resource. - // Default is 'ngf::'. If a value is provided by the user, - // then the default becomes a prefix to that value. - // - // +optional - // +kubebuilder:validation:MaxLength=127 - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_-]+$` - ServiceName *string `json:"serviceName,omitempty"` - - // SpanAttributes are custom key/value attributes that are added to each span. - // - // +optional - // +listType=map - // +listMapKey=key - // +kubebuilder:validation:MaxItems=64 - SpanAttributes []SpanAttribute `json:"spanAttributes,omitempty"` -} - -// TelemetryExporter specifies OpenTelemetry export parameters. -type TelemetryExporter struct { - // Interval is the maximum interval between two exports. - // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter - // - // +optional - Interval *Duration `json:"interval,omitempty"` - - // BatchSize is the maximum number of spans to be sent in one batch per worker. - // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter - // - // +optional - // +kubebuilder:validation:Minimum=0 - BatchSize *int32 `json:"batchSize,omitempty"` - - // BatchCount is the number of pending batches per worker, spans exceeding the limit are dropped. - // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter - // - // +optional - // +kubebuilder:validation:Minimum=0 - BatchCount *int32 `json:"batchCount,omitempty"` - - // Endpoint is the address of OTLP/gRPC endpoint that will accept telemetry data. - // Format: alphanumeric hostname with optional http scheme and optional port. - // - //nolint:lll - // +kubebuilder:validation:Pattern=`^(?:http?:\/\/)?[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)*(?::\d{1,5})?$` - Endpoint string `json:"endpoint"` -} - -// RewriteClientIP specifies the configuration for rewriting the client's IP address. -type RewriteClientIP struct { - // Mode defines how NGINX will rewrite the client's IP address. - // There are two possible modes: - // - ProxyProtocol: NGINX will rewrite the client's IP using the PROXY protocol header. - // - XForwardedFor: NGINX will rewrite the client's IP using the X-Forwarded-For header. - // Sets NGINX directive real_ip_header: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header - // - // +optional - Mode *RewriteClientIPModeType `json:"mode,omitempty"` - - // SetIPRecursively configures whether recursive search is used when selecting the client's address from - // the X-Forwarded-For header. It is used in conjunction with TrustedAddresses. - // If enabled, NGINX will recurse on the values in X-Forwarded-Header from the end of array - // to start of array and select the first untrusted IP. - // For example, if X-Forwarded-For is [11.11.11.11, 22.22.22.22, 55.55.55.1], - // and TrustedAddresses is set to 55.55.55.1/32, NGINX will rewrite the client IP to 22.22.22.22. - // If disabled, NGINX will select the IP at the end of the array. - // In the previous example, 55.55.55.1 would be selected. - // Sets NGINX directive real_ip_recursive: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive - // - // +optional - SetIPRecursively *bool `json:"setIPRecursively,omitempty"` - - // TrustedAddresses specifies the addresses that are trusted to send correct client IP information. - // If a request comes from a trusted address, NGINX will rewrite the client IP information, - // and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. - // If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - // TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. - // To trust all addresses (not recommended for production), set to 0.0.0.0/0. - // If no addresses are provided, NGINX will not rewrite the client IP information. - // Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from - // This field is required if mode is set. - // - // +optional - // +listType=map - // +listMapKey=type - // +kubebuilder:validation:MaxItems=16 - TrustedAddresses []RewriteClientIPAddress `json:"trustedAddresses,omitempty"` -} - -// RewriteClientIPModeType defines how NGINX Gateway Fabric will determine the client's original IP address. -// +kubebuilder:validation:Enum=ProxyProtocol;XForwardedFor -type RewriteClientIPModeType string - -const ( - // RewriteClientIPModeProxyProtocol configures NGINX to accept PROXY protocol and - // set the client's IP address to the IP address in the PROXY protocol header. - // Sets the proxy_protocol parameter on the listen directive of all servers and sets real_ip_header - // to proxy_protocol: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header. - RewriteClientIPModeProxyProtocol RewriteClientIPModeType = "ProxyProtocol" - - // RewriteClientIPModeXForwardedFor configures NGINX to set the client's IP address to the - // IP address in the X-Forwarded-For HTTP header. - // https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header. - RewriteClientIPModeXForwardedFor RewriteClientIPModeType = "XForwardedFor" -) - -// IPFamilyType specifies the IP family to be used by NGINX. -// -// +kubebuilder:validation:Enum=dual;ipv4;ipv6 -type IPFamilyType string - -const ( - // Dual specifies that NGINX will use both IPv4 and IPv6. - Dual IPFamilyType = "dual" - // IPv4 specifies that NGINX will use only IPv4. - IPv4 IPFamilyType = "ipv4" - // IPv6 specifies that NGINX will use only IPv6. - IPv6 IPFamilyType = "ipv6" -) - -// RewriteClientIPAddress specifies the address type and value for a RewriteClientIP address. -type RewriteClientIPAddress struct { - // Type specifies the type of address. - Type RewriteClientIPAddressType `json:"type"` - - // Value specifies the address value. - Value string `json:"value"` -} - -// RewriteClientIPAddressType specifies the type of address. -// +kubebuilder:validation:Enum=CIDR;IPAddress;Hostname -type RewriteClientIPAddressType string - -const ( - // RewriteClientIPCIDRAddressType specifies that the address is a CIDR block. - RewriteClientIPCIDRAddressType RewriteClientIPAddressType = "CIDR" - - // RewriteClientIPIPAddressType specifies that the address is an IP address. - RewriteClientIPIPAddressType RewriteClientIPAddressType = "IPAddress" - - // RewriteClientIPHostnameAddressType specifies that the address is a Hostname. - RewriteClientIPHostnameAddressType RewriteClientIPAddressType = "Hostname" -) - -// NginxPlusAllowAddress specifies the address type and value for an NginxPlus allow address. -type NginxPlusAllowAddress struct { - // Type specifies the type of address. - Type NginxPlusAllowAddressType `json:"type"` - - // Value specifies the address value. - Value string `json:"value"` -} - -// NginxPlusAllowAddressType specifies the type of address. -// +kubebuilder:validation:Enum=CIDR;IPAddress -type NginxPlusAllowAddressType string - -const ( - // NginxPlusAllowCIDRAddressType specifies that the address is a CIDR block. - NginxPlusAllowCIDRAddressType NginxPlusAllowAddressType = "CIDR" - - // NginxPlusAllowIPAddressType specifies that the address is an IP address. - NginxPlusAllowIPAddressType NginxPlusAllowAddressType = "IPAddress" -) - -// NginxLogging defines logging related settings for NGINX. -type NginxLogging struct { - // ErrorLevel defines the error log level. Possible log levels listed in order of increasing severity are - // debug, info, notice, warn, error, crit, alert, and emerg. Setting a certain log level will cause all messages - // of the specified and more severe log levels to be logged. For example, the log level 'error' will cause error, - // crit, alert, and emerg messages to be logged. https://nginx.org/en/docs/ngx_core_module.html#error_log - // - // +optional - // +kubebuilder:default=info - ErrorLevel *NginxErrorLogLevel `json:"errorLevel,omitempty"` -} - -// NginxErrorLogLevel type defines the log level of error logs for NGINX. -// -// +kubebuilder:validation:Enum=debug;info;notice;warn;error;crit;alert;emerg -type NginxErrorLogLevel string - -const ( - // NginxLogLevelDebug is the debug level for NGINX error logs. - NginxLogLevelDebug NginxErrorLogLevel = "debug" - - // NginxLogLevelInfo is the info level for NGINX error logs. - NginxLogLevelInfo NginxErrorLogLevel = "info" - - // NginxLogLevelNotice is the notice level for NGINX error logs. - NginxLogLevelNotice NginxErrorLogLevel = "notice" - - // NginxLogLevelWarn is the warn level for NGINX error logs. - NginxLogLevelWarn NginxErrorLogLevel = "warn" - - // NginxLogLevelError is the error level for NGINX error logs. - NginxLogLevelError NginxErrorLogLevel = "error" - - // NginxLogLevelCrit is the crit level for NGINX error logs. - NginxLogLevelCrit NginxErrorLogLevel = "crit" - - // NginxLogLevelAlert is the alert level for NGINX error logs. - NginxLogLevelAlert NginxErrorLogLevel = "alert" - - // NginxLogLevelEmerg is the emerg level for NGINX error logs. - NginxLogLevelEmerg NginxErrorLogLevel = "emerg" -) diff --git a/apis/v1alpha1/register.go b/apis/v1alpha1/register.go index 0d18c29eaa..7deb5bfb5c 100644 --- a/apis/v1alpha1/register.go +++ b/apis/v1alpha1/register.go @@ -34,8 +34,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NginxGateway{}, &NginxGatewayList{}, - &NginxProxy{}, - &NginxProxyList{}, &ObservabilityPolicy{}, &ObservabilityPolicyList{}, &ClientSettingsPolicy{}, diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 96100bed3f..65b3b76c30 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -318,159 +318,6 @@ func (in *NginxGatewayStatus) DeepCopy() *NginxGatewayStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { - *out = *in - if in.ErrorLevel != nil { - in, out := &in.ErrorLevel, &out.ErrorLevel - *out = new(NginxErrorLogLevel) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxLogging. -func (in *NginxLogging) DeepCopy() *NginxLogging { - if in == nil { - return nil - } - out := new(NginxLogging) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxPlus) DeepCopyInto(out *NginxPlus) { - *out = *in - if in.AllowedAddresses != nil { - in, out := &in.AllowedAddresses, &out.AllowedAddresses - *out = make([]NginxPlusAllowAddress, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlus. -func (in *NginxPlus) DeepCopy() *NginxPlus { - if in == nil { - return nil - } - out := new(NginxPlus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxPlusAllowAddress) DeepCopyInto(out *NginxPlusAllowAddress) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlusAllowAddress. -func (in *NginxPlusAllowAddress) DeepCopy() *NginxPlusAllowAddress { - if in == nil { - return nil - } - out := new(NginxPlusAllowAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxy) DeepCopyInto(out *NginxProxy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxy. -func (in *NginxProxy) DeepCopy() *NginxProxy { - if in == nil { - return nil - } - out := new(NginxProxy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NginxProxy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxyList) DeepCopyInto(out *NginxProxyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NginxProxy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxyList. -func (in *NginxProxyList) DeepCopy() *NginxProxyList { - if in == nil { - return nil - } - out := new(NginxProxyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NginxProxyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { - *out = *in - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamilyType) - **out = **in - } - if in.Telemetry != nil { - in, out := &in.Telemetry, &out.Telemetry - *out = new(Telemetry) - (*in).DeepCopyInto(*out) - } - if in.RewriteClientIP != nil { - in, out := &in.RewriteClientIP, &out.RewriteClientIP - *out = new(RewriteClientIP) - (*in).DeepCopyInto(*out) - } - if in.Logging != nil { - in, out := &in.Logging, &out.Logging - *out = new(NginxLogging) - (*in).DeepCopyInto(*out) - } - if in.NginxPlus != nil { - in, out := &in.NginxPlus, &out.NginxPlus - *out = new(NginxPlus) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxySpec. -func (in *NginxProxySpec) DeepCopy() *NginxProxySpec { - if in == nil { - return nil - } - out := new(NginxProxySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObservabilityPolicy) DeepCopyInto(out *ObservabilityPolicy) { *out = *in @@ -557,51 +404,6 @@ func (in *ObservabilityPolicySpec) DeepCopy() *ObservabilityPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RewriteClientIP) DeepCopyInto(out *RewriteClientIP) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(RewriteClientIPModeType) - **out = **in - } - if in.SetIPRecursively != nil { - in, out := &in.SetIPRecursively, &out.SetIPRecursively - *out = new(bool) - **out = **in - } - if in.TrustedAddresses != nil { - in, out := &in.TrustedAddresses, &out.TrustedAddresses - *out = make([]RewriteClientIPAddress, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIP. -func (in *RewriteClientIP) DeepCopy() *RewriteClientIP { - if in == nil { - return nil - } - out := new(RewriteClientIP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RewriteClientIPAddress) DeepCopyInto(out *RewriteClientIPAddress) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIPAddress. -func (in *RewriteClientIPAddress) DeepCopy() *RewriteClientIPAddress { - if in == nil { - return nil - } - out := new(RewriteClientIPAddress) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Snippet) DeepCopyInto(out *Snippet) { *out = *in @@ -733,66 +535,6 @@ func (in *SpanAttribute) DeepCopy() *SpanAttribute { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Telemetry) DeepCopyInto(out *Telemetry) { - *out = *in - if in.Exporter != nil { - in, out := &in.Exporter, &out.Exporter - *out = new(TelemetryExporter) - (*in).DeepCopyInto(*out) - } - if in.ServiceName != nil { - in, out := &in.ServiceName, &out.ServiceName - *out = new(string) - **out = **in - } - if in.SpanAttributes != nil { - in, out := &in.SpanAttributes, &out.SpanAttributes - *out = make([]SpanAttribute, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Telemetry. -func (in *Telemetry) DeepCopy() *Telemetry { - if in == nil { - return nil - } - out := new(Telemetry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TelemetryExporter) DeepCopyInto(out *TelemetryExporter) { - *out = *in - if in.Interval != nil { - in, out := &in.Interval, &out.Interval - *out = new(Duration) - **out = **in - } - if in.BatchSize != nil { - in, out := &in.BatchSize, &out.BatchSize - *out = new(int32) - **out = **in - } - if in.BatchCount != nil { - in, out := &in.BatchCount, &out.BatchCount - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryExporter. -func (in *TelemetryExporter) DeepCopy() *TelemetryExporter { - if in == nil { - return nil - } - out := new(TelemetryExporter) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tracing) DeepCopyInto(out *Tracing) { *out = *in diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go new file mode 100644 index 0000000000..99daf4cfa9 --- /dev/null +++ b/apis/v1alpha2/nginxproxy_types.go @@ -0,0 +1,588 @@ +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=nginx-gateway-fabric,scope=Namespaced +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef +// or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. +// If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. +// If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass +// reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those +// set on the GatewayClass NginxProxy. +type NginxProxy struct { //nolint:govet // standard field alignment, don't change it + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the NginxProxy. + Spec NginxProxySpec `json:"spec"` +} + +// +kubebuilder:object:root=true + +// NginxProxyList contains a list of NginxProxies. +type NginxProxyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NginxProxy `json:"items"` +} + +// NginxProxySpec defines the desired state of the NginxProxy. +type NginxProxySpec struct { + // IPFamily specifies the IP family to be used by the NGINX. + // Default is "dual", meaning the server will use both IPv4 and IPv6. + // + // +optional + // +kubebuilder:default:=dual + IPFamily *IPFamilyType `json:"ipFamily,omitempty"` + // Telemetry specifies the OpenTelemetry configuration. + // + // +optional + Telemetry *Telemetry `json:"telemetry,omitempty"` + // Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + // re-roll of the NGINX deployment. + // + // +optional + Metrics *Metrics `json:"metrics,omitempty"` + // RewriteClientIP defines configuration for rewriting the client IP to the original client's IP. + // +kubebuilder:validation:XValidation:message="if mode is set, trustedAddresses is a required field",rule="!(has(self.mode) && (!has(self.trustedAddresses) || size(self.trustedAddresses) == 0))" + // + // +optional + //nolint:lll + RewriteClientIP *RewriteClientIP `json:"rewriteClientIP,omitempty"` + // Logging defines logging related settings for NGINX. + // + // +optional + Logging *NginxLogging `json:"logging,omitempty"` + // NginxPlus specifies NGINX Plus additional settings. + // + // +optional + NginxPlus *NginxPlus `json:"nginxPlus,omitempty"` + // DisableHTTP2 defines if http2 should be disabled for all servers. + // If not specified, or set to false, http2 will be enabled for all servers. + // + // +optional + DisableHTTP2 *bool `json:"disableHTTP2,omitempty"` + // Kubernetes contains the configuration for the NGINX Deployment and Service Kubernetes objects. + // + // +optional + Kubernetes *KubernetesSpec `json:"kubernetes,omitempty"` +} + +// Telemetry specifies the OpenTelemetry configuration. +type Telemetry struct { + // DisabledFeatures specifies OpenTelemetry features to be disabled. + // + // +optional + DisabledFeatures []DisableTelemetryFeature `json:"disabledFeatures,omitempty"` + + // Exporter specifies OpenTelemetry export parameters. + // + // +optional + Exporter *TelemetryExporter `json:"exporter,omitempty"` + + // ServiceName is the "service.name" attribute of the OpenTelemetry resource. + // Default is 'ngf::'. If a value is provided by the user, + // then the default becomes a prefix to that value. + // + // +optional + // +kubebuilder:validation:MaxLength=127 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_-]+$` + ServiceName *string `json:"serviceName,omitempty"` + + // SpanAttributes are custom key/value attributes that are added to each span. + // + // +optional + // +listType=map + // +listMapKey=key + // +kubebuilder:validation:MaxItems=64 + SpanAttributes []v1alpha1.SpanAttribute `json:"spanAttributes,omitempty"` +} + +// DisableTelemetryFeature is a telemetry feature that can be disabled. +// +// +kubebuilder:validation:Enum=DisableTracing +type DisableTelemetryFeature string + +const ( + // DisableTracing disables the OpenTelemetry tracing feature. + DisableTracing DisableTelemetryFeature = "DisableTracing" +) + +// TelemetryExporter specifies OpenTelemetry export parameters. +type TelemetryExporter struct { + // Interval is the maximum interval between two exports. + // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter + // + // +optional + Interval *v1alpha1.Duration `json:"interval,omitempty"` + + // BatchSize is the maximum number of spans to be sent in one batch per worker. + // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter + // + // +optional + // +kubebuilder:validation:Minimum=0 + BatchSize *int32 `json:"batchSize,omitempty"` + + // BatchCount is the number of pending batches per worker, spans exceeding the limit are dropped. + // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter + // + // +optional + // +kubebuilder:validation:Minimum=0 + BatchCount *int32 `json:"batchCount,omitempty"` + + // Endpoint is the address of OTLP/gRPC endpoint that will accept telemetry data. + // Format: alphanumeric hostname with optional http scheme and optional port. + // + //nolint:lll + // +optional + // +kubebuilder:validation:Pattern=`^(?:http?:\/\/)?[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)*(?::\d{1,5})?$` + Endpoint *string `json:"endpoint,omitempty"` +} + +// Metrics defines the configuration for Prometheus scraping metrics. +type Metrics struct { + // Port where the Prometheus metrics are exposed. + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *int32 `json:"port,omitempty"` + + // Disable serving Prometheus metrics on the listen port. + // + // +optional + Disable *bool `json:"disable,omitempty"` +} + +// RewriteClientIP specifies the configuration for rewriting the client's IP address. +type RewriteClientIP struct { + // Mode defines how NGINX will rewrite the client's IP address. + // There are two possible modes: + // - ProxyProtocol: NGINX will rewrite the client's IP using the PROXY protocol header. + // - XForwardedFor: NGINX will rewrite the client's IP using the X-Forwarded-For header. + // Sets NGINX directive real_ip_header: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header + // + // +optional + Mode *RewriteClientIPModeType `json:"mode,omitempty"` + + // SetIPRecursively configures whether recursive search is used when selecting the client's address from + // the X-Forwarded-For header. It is used in conjunction with TrustedAddresses. + // If enabled, NGINX will recurse on the values in X-Forwarded-Header from the end of array + // to start of array and select the first untrusted IP. + // For example, if X-Forwarded-For is [11.11.11.11, 22.22.22.22, 55.55.55.1], + // and TrustedAddresses is set to 55.55.55.1/32, NGINX will rewrite the client IP to 22.22.22.22. + // If disabled, NGINX will select the IP at the end of the array. + // In the previous example, 55.55.55.1 would be selected. + // Sets NGINX directive real_ip_recursive: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive + // + // +optional + SetIPRecursively *bool `json:"setIPRecursively,omitempty"` + + // TrustedAddresses specifies the addresses that are trusted to send correct client IP information. + // If a request comes from a trusted address, NGINX will rewrite the client IP information, + // and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. + // If the request does not come from a trusted address, NGINX will not rewrite the client IP information. + // To trust all addresses (not recommended for production), set to 0.0.0.0/0. + // If no addresses are provided, NGINX will not rewrite the client IP information. + // Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from + // This field is required if mode is set. + // + // +optional + // +kubebuilder:validation:MaxItems=16 + TrustedAddresses []RewriteClientIPAddress `json:"trustedAddresses,omitempty"` +} + +// RewriteClientIPModeType defines how NGINX Gateway Fabric will determine the client's original IP address. +// +kubebuilder:validation:Enum=ProxyProtocol;XForwardedFor +type RewriteClientIPModeType string + +const ( + // RewriteClientIPModeProxyProtocol configures NGINX to accept PROXY protocol and + // set the client's IP address to the IP address in the PROXY protocol header. + // Sets the proxy_protocol parameter on the listen directive of all servers and sets real_ip_header + // to proxy_protocol: https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header. + RewriteClientIPModeProxyProtocol RewriteClientIPModeType = "ProxyProtocol" + + // RewriteClientIPModeXForwardedFor configures NGINX to set the client's IP address to the + // IP address in the X-Forwarded-For HTTP header. + // https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header. + RewriteClientIPModeXForwardedFor RewriteClientIPModeType = "XForwardedFor" +) + +// IPFamilyType specifies the IP family to be used by NGINX. +// +// +kubebuilder:validation:Enum=dual;ipv4;ipv6 +type IPFamilyType string + +const ( + // Dual specifies that NGINX will use both IPv4 and IPv6. + Dual IPFamilyType = "dual" + // IPv4 specifies that NGINX will use only IPv4. + IPv4 IPFamilyType = "ipv4" + // IPv6 specifies that NGINX will use only IPv6. + IPv6 IPFamilyType = "ipv6" +) + +// RewriteClientIPAddress specifies the address type and value for a RewriteClientIP address. +type RewriteClientIPAddress struct { + // Type specifies the type of address. + Type RewriteClientIPAddressType `json:"type"` + + // Value specifies the address value. + Value string `json:"value"` +} + +// RewriteClientIPAddressType specifies the type of address. +// +kubebuilder:validation:Enum=CIDR;IPAddress;Hostname +type RewriteClientIPAddressType string + +const ( + // RewriteClientIPCIDRAddressType specifies that the address is a CIDR block. + RewriteClientIPCIDRAddressType RewriteClientIPAddressType = "CIDR" + + // RewriteClientIPIPAddressType specifies that the address is an IP address. + RewriteClientIPIPAddressType RewriteClientIPAddressType = "IPAddress" + + // RewriteClientIPHostnameAddressType specifies that the address is a Hostname. + RewriteClientIPHostnameAddressType RewriteClientIPAddressType = "Hostname" +) + +// NginxLogging defines logging related settings for NGINX. +type NginxLogging struct { + // ErrorLevel defines the error log level. Possible log levels listed in order of increasing severity are + // debug, info, notice, warn, error, crit, alert, and emerg. Setting a certain log level will cause all messages + // of the specified and more severe log levels to be logged. For example, the log level 'error' will cause error, + // crit, alert, and emerg messages to be logged. https://nginx.org/en/docs/ngx_core_module.html#error_log + // + // +optional + // +kubebuilder:default=info + ErrorLevel *NginxErrorLogLevel `json:"errorLevel,omitempty"` + + // AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + // re-roll of the NGINX deployment. + // + // +optional + // +kubebuilder:default=info + AgentLevel *AgentLogLevel `json:"agentLevel,omitempty"` +} + +// NginxErrorLogLevel type defines the log level of error logs for NGINX. +// +// +kubebuilder:validation:Enum=debug;info;notice;warn;error;crit;alert;emerg +type NginxErrorLogLevel string + +const ( + // NginxLogLevelDebug is the debug level for NGINX error logs. + NginxLogLevelDebug NginxErrorLogLevel = "debug" + + // NginxLogLevelInfo is the info level for NGINX error logs. + NginxLogLevelInfo NginxErrorLogLevel = "info" + + // NginxLogLevelNotice is the notice level for NGINX error logs. + NginxLogLevelNotice NginxErrorLogLevel = "notice" + + // NginxLogLevelWarn is the warn level for NGINX error logs. + NginxLogLevelWarn NginxErrorLogLevel = "warn" + + // NginxLogLevelError is the error level for NGINX error logs. + NginxLogLevelError NginxErrorLogLevel = "error" + + // NginxLogLevelCrit is the crit level for NGINX error logs. + NginxLogLevelCrit NginxErrorLogLevel = "crit" + + // NginxLogLevelAlert is the alert level for NGINX error logs. + NginxLogLevelAlert NginxErrorLogLevel = "alert" + + // NginxLogLevelEmerg is the emerg level for NGINX error logs. + NginxLogLevelEmerg NginxErrorLogLevel = "emerg" +) + +// AgentLevel defines the log level of the NGINX agent process. +// +// +kubebuilder:validation:Enum=debug;info;error;panic;fatal +type AgentLogLevel string + +const ( + // AgentLogLevelDebug is the debug level NGINX agent logs. + AgentLogLevelDebug AgentLogLevel = "debug" + + // AgentLogLevelInfo is the info level NGINX agent logs. + AgentLogLevelInfo AgentLogLevel = "info" + + // AgentLogLevelError is the error level NGINX agent logs. + AgentLogLevelError AgentLogLevel = "error" + + // AgentLogLevelPanic is the panic level NGINX agent logs. + AgentLogLevelPanic AgentLogLevel = "panic" + + // AgentLogLevelFatal is the fatal level NGINX agent logs. + AgentLogLevelFatal AgentLogLevel = "fatal" +) + +// NginxPlus specifies NGINX Plus additional settings. These will only be applied if NGINX Plus is being used. +type NginxPlus struct { + // AllowedAddresses specifies IPAddresses or CIDR blocks to the allow list for accessing the NGINX Plus API. + // + // +optional + AllowedAddresses []NginxPlusAllowAddress `json:"allowedAddresses,omitempty"` +} + +// NginxPlusAllowAddress specifies the address type and value for an NginxPlus allow address. +type NginxPlusAllowAddress struct { + // Type specifies the type of address. + Type NginxPlusAllowAddressType `json:"type"` + + // Value specifies the address value. + Value string `json:"value"` +} + +// NginxPlusAllowAddressType specifies the type of address. +// +kubebuilder:validation:Enum=CIDR;IPAddress +type NginxPlusAllowAddressType string + +const ( + // NginxPlusAllowCIDRAddressType specifies that the address is a CIDR block. + NginxPlusAllowCIDRAddressType NginxPlusAllowAddressType = "CIDR" + + // NginxPlusAllowIPAddressType specifies that the address is an IP address. + NginxPlusAllowIPAddressType NginxPlusAllowAddressType = "IPAddress" +) + +// KubernetesSpec contains the configuration for the NGINX Deployment and Service Kubernetes objects. +type KubernetesSpec struct { + // Deployment is the configuration for the NGINX Deployment. + // This is the default deployment option. + // + // +optional + Deployment *DeploymentSpec `json:"deployment,omitempty"` + + // Service is the configuration for the NGINX Service. + // + // +optional + Service *ServiceSpec `json:"service,omitempty"` +} + +// Deployment is the configuration for the NGINX Deployment. +type DeploymentSpec struct { + // Number of desired Pods. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Pod defines Pod-specific fields. + // + // +optional + Pod PodSpec `json:"pod,omitempty"` + + // Container defines container fields for the NGINX container. + // + // +optional + Container ContainerSpec `json:"container,omitempty"` +} + +// PodSpec defines Pod-specific fields. +type PodSpec struct { + // TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // Affinity is the pod's scheduling constraints. + // + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations allow the scheduler to schedule Pods with matching taints. + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Volumes represents named volumes in a pod that may be accessed by any container in the pod. + // + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // TopologySpreadConstraints describes how a group of Pods ought to spread across topology + // domains. Scheduler will schedule Pods in a way which abides by the constraints. + // All topologySpreadConstraints are ANDed. + // + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ContainerSpec defines container fields for the NGINX container. +type ContainerSpec struct { + // Debug enables debugging for NGINX by using the nginx-debug binary. + // + // +optional + Debug *bool `json:"debug,omitempty"` + + // Image is the NGINX image to use. + // + // +optional + Image *Image `json:"image,omitempty"` + + // Resources describes the compute resource requirements. + // + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // Lifecycle describes actions that the management system should take in response to container lifecycle + // events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + // until the action is complete, unless the container process fails, in which case the handler is aborted. + // + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` + + // VolumeMounts describe the mounting of Volumes within a container. + // + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` +} + +// Image is the NGINX image to use. +type Image struct { + // Repository is the image path. + // Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + // + // +optional + Repository *string `json:"repository,omitempty"` + // Tag is the image tag to use. Default matches the tag of the control plane. + // + // +optional + Tag *string `json:"tag,omitempty"` + // PullPolicy describes a policy for if/when to pull a container image. + // + // +optional + // +kubebuilder:default:=IfNotPresent + PullPolicy *PullPolicy `json:"pullPolicy,omitempty"` +} + +// PullPolicy describes a policy for if/when to pull a container image. +// +kubebuilder:validation:Enum=Always;Never;IfNotPresent +type PullPolicy corev1.PullPolicy + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail if the pull fails. + PullAlways PullPolicy = PullPolicy(corev1.PullAlways) + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the + // image isn't present. + PullNever PullPolicy = PullPolicy(corev1.PullNever) + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image + // isn't present and the pull fails. + PullIfNotPresent PullPolicy = PullPolicy(corev1.PullIfNotPresent) +) + +// ServiceSpec is the configuration for the NGINX Service. +type ServiceSpec struct { + // ServiceType describes ingress method for the Service. + // + // +optional + // +kubebuilder:default:=LoadBalancer + ServiceType *ServiceType `json:"type,omitempty"` + + // ExternalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + // and LoadBalancer IPs. + // + // +optional + // +kubebuilder:default:=Local + ExternalTrafficPolicy *ExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` + + // LoadBalancerIP is a static IP address for the load balancer. Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + + // LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + // Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` + + // LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + // Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + + // NodePorts are the list of NodePorts to expose on the NGINX data plane service. + // Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + // The default NodePort range enforced by Kubernetes is 30000-32767. + // + // +optional + NodePorts []NodePort `json:"nodePorts,omitempty"` +} + +// ServiceType describes ingress method for the Service. +// +kubebuilder:validation:Enum=ClusterIP;LoadBalancer;NodePort +type ServiceType corev1.ServiceType + +const ( + // ServiceTypeClusterIP means a Service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = ServiceType(corev1.ServiceTypeClusterIP) + + // ServiceTypeNodePort means a Service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = ServiceType(corev1.ServiceTypeNodePort) + + // ServiceTypeLoadBalancer means a Service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = ServiceType(corev1.ServiceTypeLoadBalancer) +) + +// ExternalTrafficPolicy describes how nodes distribute service traffic they +// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, +// and LoadBalancer IPs. Ignored for ClusterIP services. +// +kubebuilder:validation:Enum=Cluster;Local +type ExternalTrafficPolicy corev1.ServiceExternalTrafficPolicy + +const ( + // ExternalTrafficPolicyCluster routes traffic to all endpoints. + ExternalTrafficPolicyCluster ExternalTrafficPolicy = ExternalTrafficPolicy(corev1.ServiceExternalTrafficPolicyCluster) + + // ExternalTrafficPolicyLocal preserves the source IP of the traffic by + // routing only to endpoints on the same node as the traffic was received on + // (dropping the traffic if there are no local endpoints). + ExternalTrafficPolicyLocal ExternalTrafficPolicy = ExternalTrafficPolicy(corev1.ServiceExternalTrafficPolicyLocal) +) + +// NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST +// map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort +// automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. +type NodePort struct { + // Port is the NodePort to expose. + // kubebuilder:validation:Minimum=1 + // kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` + + // ListenerPort is the Gateway listener port that this NodePort maps to. + // kubebuilder:validation:Minimum=1 + // kubebuilder:validation:Maximum=65535 + ListenerPort int32 `json:"listenerPort"` +} diff --git a/apis/v1alpha2/register.go b/apis/v1alpha2/register.go index 23601e280e..8615a5a3b0 100644 --- a/apis/v1alpha2/register.go +++ b/apis/v1alpha2/register.go @@ -32,11 +32,12 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &NginxProxy{}, + &NginxProxyList{}, &ObservabilityPolicy{}, &ObservabilityPolicyList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil } diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index 77cf20bb07..c0ddf4ed6e 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -6,10 +6,343 @@ package v1alpha2 import ( "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { + *out = *in + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(bool) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSpec. +func (in *ContainerSpec) DeepCopy() *ContainerSpec { + if in == nil { + return nil + } + out := new(ContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Pod.DeepCopyInto(&out.Pod) + in.Container.DeepCopyInto(&out.Container) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSpec) DeepCopyInto(out *KubernetesSpec) { + *out = *in + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(DeploymentSpec) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSpec. +func (in *KubernetesSpec) DeepCopy() *KubernetesSpec { + if in == nil { + return nil + } + out := new(KubernetesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metrics) DeepCopyInto(out *Metrics) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. +func (in *Metrics) DeepCopy() *Metrics { + if in == nil { + return nil + } + out := new(Metrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { + *out = *in + if in.ErrorLevel != nil { + in, out := &in.ErrorLevel, &out.ErrorLevel + *out = new(NginxErrorLogLevel) + **out = **in + } + if in.AgentLevel != nil { + in, out := &in.AgentLevel, &out.AgentLevel + *out = new(AgentLogLevel) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxLogging. +func (in *NginxLogging) DeepCopy() *NginxLogging { + if in == nil { + return nil + } + out := new(NginxLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxPlus) DeepCopyInto(out *NginxPlus) { + *out = *in + if in.AllowedAddresses != nil { + in, out := &in.AllowedAddresses, &out.AllowedAddresses + *out = make([]NginxPlusAllowAddress, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlus. +func (in *NginxPlus) DeepCopy() *NginxPlus { + if in == nil { + return nil + } + out := new(NginxPlus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxPlusAllowAddress) DeepCopyInto(out *NginxPlusAllowAddress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlusAllowAddress. +func (in *NginxPlusAllowAddress) DeepCopy() *NginxPlusAllowAddress { + if in == nil { + return nil + } + out := new(NginxPlusAllowAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxy) DeepCopyInto(out *NginxProxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxy. +func (in *NginxProxy) DeepCopy() *NginxProxy { + if in == nil { + return nil + } + out := new(NginxProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NginxProxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxyList) DeepCopyInto(out *NginxProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NginxProxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxyList. +func (in *NginxProxyList) DeepCopy() *NginxProxyList { + if in == nil { + return nil + } + out := new(NginxProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NginxProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { + *out = *in + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamilyType) + **out = **in + } + if in.Telemetry != nil { + in, out := &in.Telemetry, &out.Telemetry + *out = new(Telemetry) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(Metrics) + (*in).DeepCopyInto(*out) + } + if in.RewriteClientIP != nil { + in, out := &in.RewriteClientIP, &out.RewriteClientIP + *out = new(RewriteClientIP) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(NginxLogging) + (*in).DeepCopyInto(*out) + } + if in.NginxPlus != nil { + in, out := &in.NginxPlus, &out.NginxPlus + *out = new(NginxPlus) + (*in).DeepCopyInto(*out) + } + if in.DisableHTTP2 != nil { + in, out := &in.DisableHTTP2, &out.DisableHTTP2 + *out = new(bool) + **out = **in + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxySpec. +func (in *NginxProxySpec) DeepCopy() *NginxProxySpec { + if in == nil { + return nil + } + out := new(NginxProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePort) DeepCopyInto(out *NodePort) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePort. +func (in *NodePort) DeepCopy() *NodePort { + if in == nil { + return nil + } + out := new(NodePort) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObservabilityPolicy) DeepCopyInto(out *ObservabilityPolicy) { *out = *in @@ -96,6 +429,219 @@ func (in *ObservabilityPolicySpec) DeepCopy() *ObservabilityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteClientIP) DeepCopyInto(out *RewriteClientIP) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(RewriteClientIPModeType) + **out = **in + } + if in.SetIPRecursively != nil { + in, out := &in.SetIPRecursively, &out.SetIPRecursively + *out = new(bool) + **out = **in + } + if in.TrustedAddresses != nil { + in, out := &in.TrustedAddresses, &out.TrustedAddresses + *out = make([]RewriteClientIPAddress, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIP. +func (in *RewriteClientIP) DeepCopy() *RewriteClientIP { + if in == nil { + return nil + } + out := new(RewriteClientIP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteClientIPAddress) DeepCopyInto(out *RewriteClientIPAddress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIPAddress. +func (in *RewriteClientIPAddress) DeepCopy() *RewriteClientIPAddress { + if in == nil { + return nil + } + out := new(RewriteClientIPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(ServiceType) + **out = **in + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(ExternalTrafficPolicy) + **out = **in + } + if in.LoadBalancerIP != nil { + in, out := &in.LoadBalancerIP, &out.LoadBalancerIP + *out = new(string) + **out = **in + } + if in.LoadBalancerClass != nil { + in, out := &in.LoadBalancerClass, &out.LoadBalancerClass + *out = new(string) + **out = **in + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodePorts != nil { + in, out := &in.NodePorts, &out.NodePorts + *out = make([]NodePort, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Telemetry) DeepCopyInto(out *Telemetry) { + *out = *in + if in.DisabledFeatures != nil { + in, out := &in.DisabledFeatures, &out.DisabledFeatures + *out = make([]DisableTelemetryFeature, len(*in)) + copy(*out, *in) + } + if in.Exporter != nil { + in, out := &in.Exporter, &out.Exporter + *out = new(TelemetryExporter) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SpanAttributes != nil { + in, out := &in.SpanAttributes, &out.SpanAttributes + *out = make([]v1alpha1.SpanAttribute, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Telemetry. +func (in *Telemetry) DeepCopy() *Telemetry { + if in == nil { + return nil + } + out := new(Telemetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryExporter) DeepCopyInto(out *TelemetryExporter) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(v1alpha1.Duration) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int32) + **out = **in + } + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(int32) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryExporter. +func (in *TelemetryExporter) DeepCopy() *TelemetryExporter { + if in == nil { + return nil + } + out := new(TelemetryExporter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tracing) DeepCopyInto(out *Tracing) { *out = *in diff --git a/build/Dockerfile b/build/Dockerfile index d00d848668..7495a0b71e 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -11,36 +11,20 @@ RUN make build FROM golang:1.24 AS ca-certs-provider -FROM alpine:3.21 AS capabilizer -RUN apk add --no-cache libcap - -FROM capabilizer AS local-capabilizer -COPY ./build/out/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer AS container-capabilizer -COPY --from=builder /go/src/github.com/nginx/nginx-gateway-fabric/build/out/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer AS goreleaser-capabilizer -ARG TARGETARCH -COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - FROM scratch AS common -# CA certs are needed for telemetry report and NGINX Plus usage report features, so that -# NGF can verify the server's certificate. +# CA certs are needed for telemetry report so that NGF can verify the server's certificate. COPY --from=ca-certs-provider --link /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -USER 102:1001 +USER 101:1001 ARG BUILD_AGENT ENV BUILD_AGENT=${BUILD_AGENT} ENTRYPOINT [ "/usr/bin/gateway" ] FROM common AS container -COPY --from=container-capabilizer /usr/bin/gateway /usr/bin/ +COPY --from=builder /go/src/github.com/nginxinc/nginx-gateway-fabric/build/out/gateway /usr/bin/ FROM common AS local -COPY --from=local-capabilizer /usr/bin/gateway /usr/bin/ +COPY ./build/out/gateway /usr/bin/ FROM common AS goreleaser -COPY --from=goreleaser-capabilizer /usr/bin/gateway /usr/bin/ +ARG TARGETARCH +COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ diff --git a/build/Dockerfile.nginx b/build/Dockerfile.nginx index 182c172245..3d5dc24241 100644 --- a/build/Dockerfile.nginx +++ b/build/Dockerfile.nginx @@ -1,18 +1,41 @@ # syntax=docker/dockerfile:1.15 +# TODO(sberman): the commented out lines are for when we use the published agent release +# FROM scratch AS nginx-files + +# # the following links can be replaced with local files if needed, i.e. ADD --chown=101:1001 +# ADD --link --chown=101:1001 https://cs.nginx.com/static/keys/nginx_signing.rsa.pub nginx_signing.rsa.pub + +FROM golang:alpine AS builder + +WORKDIR /tmp + +RUN apk add --no-cache git make \ + && git clone https://github.com/nginx/agent.git \ + && cd agent \ + && git checkout e745a3236e0f02a579461a5a435b3bcd410a686c \ + && make build + FROM nginx:1.28.0-alpine-otel ARG NJS_DIR ARG NGINX_CONF_DIR ARG BUILD_AGENT -RUN apk add --no-cache libcap \ +# RUN --mount=type=bind,from=nginx-files,src=nginx_signing.rsa.pub,target=/etc/apk/keys/nginx_signing.rsa.pub \ +# printf "%s\n" "http://packages.nginx.org/nginx-agent/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" >> /etc/apk/repositories \ +# && apk add --no-cache nginx-agent + +RUN apk add --no-cache libcap bash \ && mkdir -p /usr/lib/nginx/modules \ - && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ - && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ + && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ + && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx-debug \ && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx-debug \ && apk del libcap +COPY --from=builder /tmp/agent/build/nginx-agent /usr/bin/nginx-agent + +COPY build/entrypoint.sh /agent/entrypoint.sh COPY ${NJS_DIR}/httpmatches.js /usr/lib/nginx/modules/njs/httpmatches.js COPY ${NGINX_CONF_DIR}/nginx.conf /etc/nginx/nginx.conf COPY ${NGINX_CONF_DIR}/grpc-error-locations.conf /etc/nginx/grpc-error-locations.conf @@ -24,4 +47,4 @@ LABEL org.nginx.ngf.image.build.agent="${BUILD_AGENT}" USER 101:1001 -CMD ["sh", "-c", "rm -rf /var/run/nginx/*.sock && nginx -g 'daemon off;'"] +ENTRYPOINT ["/agent/entrypoint.sh"] diff --git a/build/Dockerfile.nginxplus b/build/Dockerfile.nginxplus index caf7d8297f..2c7d7452aa 100644 --- a/build/Dockerfile.nginxplus +++ b/build/Dockerfile.nginxplus @@ -4,6 +4,15 @@ FROM scratch AS nginx-files # the following links can be replaced with local files if needed, i.e. ADD --chown=101:1001 ADD --link --chown=101:1001 https://cs.nginx.com/static/keys/nginx_signing.rsa.pub nginx_signing.rsa.pub +FROM golang:alpine AS builder + +WORKDIR /tmp + +RUN apk add --no-cache git make \ + && git clone https://github.com/nginx/agent.git \ + && cd agent \ + && git checkout e745a3236e0f02a579461a5a435b3bcd410a686c \ + && make build FROM alpine:3.21 @@ -18,7 +27,7 @@ RUN --mount=type=secret,id=nginx-repo.crt,dst=/etc/apk/cert.pem,mode=0644 \ addgroup -g 1001 -S nginx \ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \ && printf "%s\n" "https://pkgs.nginx.com/plus/${NGINX_PLUS_VERSION}/alpine/v$(grep -E -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" >> /etc/apk/repositories \ - && apk add --no-cache nginx-plus nginx-plus-module-njs nginx-plus-module-otel libcap \ + && apk add --no-cache nginx-plus nginx-plus-module-njs nginx-plus-module-otel libcap bash \ && mkdir -p /usr/lib/nginx/modules \ && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ @@ -29,6 +38,9 @@ RUN --mount=type=secret,id=nginx-repo.crt,dst=/etc/apk/cert.pem,mode=0644 \ && ln -sf /dev/stdout /var/log/nginx/access.log \ && ln -sf /dev/stderr /var/log/nginx/error.log +COPY --from=builder /tmp/agent/build/nginx-agent /usr/bin/nginx-agent + +COPY build/entrypoint.sh /agent/entrypoint.sh COPY ${NJS_DIR}/httpmatches.js /usr/lib/nginx/modules/njs/httpmatches.js COPY ${NGINX_CONF_DIR}/nginx-plus.conf /etc/nginx/nginx.conf COPY ${NGINX_CONF_DIR}/grpc-error-locations.conf /etc/nginx/grpc-error-locations.conf @@ -40,4 +52,4 @@ USER 101:1001 LABEL org.nginx.ngf.image.build.agent="${BUILD_AGENT}" -CMD ["sh", "-c", "rm -rf /var/run/nginx/*.sock && nginx -g 'daemon off;'"] +ENTRYPOINT ["/agent/entrypoint.sh"] diff --git a/build/entrypoint.sh b/build/entrypoint.sh new file mode 100755 index 0000000000..9e9552b338 --- /dev/null +++ b/build/entrypoint.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +set -euxo pipefail + +handle_term() { + echo "received TERM signal" + echo "stopping nginx-agent ..." + kill -TERM "${agent_pid}" 2>/dev/null + wait -n ${agent_pid} + echo "stopping nginx ..." + kill -TERM "${nginx_pid}" 2>/dev/null + wait -n ${nginx_pid} +} + +handle_quit() { + echo "received QUIT signal" + echo "stopping nginx-agent ..." + kill -QUIT "${agent_pid}" 2>/dev/null + wait -n ${agent_pid} + echo "stopping nginx ..." + kill -QUIT "${nginx_pid}" 2>/dev/null + wait -n ${nginx_pid} +} + +trap 'handle_term' TERM +trap 'handle_quit' QUIT + +rm -rf /var/run/nginx/*.sock + +# Launch nginx +echo "starting nginx ..." + +# if we want to use the nginx-debug binary, we will call this script with an argument "debug" +if [ "${1:-false}" = "debug" ]; then + /usr/sbin/nginx-debug -g "daemon off;" & +else + /usr/sbin/nginx -g "daemon off;" & +fi + +nginx_pid=$! + +SECONDS=0 + +while ! ps -ef | grep "nginx: master process" | grep -v grep; do + if ((SECONDS > 5)); then + echo "couldn't find nginx master process" + exit 1 + fi +done + +# start nginx-agent, pass args +echo "starting nginx-agent ..." +nginx-agent & + +agent_pid=$! + +if [ $? != 0 ]; then + echo "couldn't start the agent, please check the log file" + exit 1 +fi + +wait_term() { + wait ${agent_pid} + trap - TERM + kill -QUIT "${nginx_pid}" 2>/dev/null + echo "waiting for nginx to stop..." + wait ${nginx_pid} +} + +wait_term + +echo "nginx-agent process has stopped, exiting." diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 3dce9a4ac7..7904cbaacc 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -112,13 +112,7 @@ By default, the NGINX Gateway Fabric helm chart deploys a LoadBalancer Service. To use a NodePort Service instead: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.type=NodePort -``` - -To disable the creation of a Service: - -```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.create=false +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` ## Upgrading the Chart @@ -253,66 +247,80 @@ kubectl kustomize https://github.com/nginx/nginx-gateway-fabric/config/crd/gatew The following table lists the configurable parameters of the NGINX Gateway Fabric chart and their default values. +> More granular configuration options may not show up in this table. +> Viewing the `values.yaml` file directly can show all available options. + | Key | Description | Type | Default | |-----|-------------|------|---------| -| `affinity` | The affinity of the NGINX Gateway Fabric pod. | object | `{}` | -| `extraVolumes` | extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. | list | `[]` | -| `metrics.enable` | Enable exposing metrics in the Prometheus format. | bool | `true` | -| `metrics.port` | Set the port where the Prometheus metrics are exposed. | int | `9113` | -| `metrics.secure` | Enable serving metrics via https. By default metrics are served via http. Please note that this endpoint will be secured with a self-signed certificate. | bool | `false` | -| `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. | object | `{}` | +| `certGenerator` | The certGenerator section contains the configuration for the cert-generator Job. | object | `{"agentTLSSecretName":"agent-tls","annotations":{},"overwrite":false,"serverTLSSecretName":"server-tls"}` | +| `certGenerator.agentTLSSecretName` | The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"agent-tls"` | +| `certGenerator.annotations` | The annotations of the cert-generator Job. | object | `{}` | +| `certGenerator.overwrite` | Overwrite existing TLS Secrets on startup. | bool | `false` | +| `certGenerator.serverTLSSecretName` | The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"server-tls"` | +| `clusterDomain` | The DNS cluster domain of your Kubernetes cluster. | string | `"cluster.local"` | +| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | +| `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | +| `nginx.container` | The container configuration for the NGINX container. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.debug` | Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. | bool | `false` | -| `nginx.extraVolumeMounts` | extraVolumeMounts are the additional volume mounts for the nginx container. | list | `[]` | -| `nginx.image.pullPolicy` | | string | `"Always"` | | `nginx.image.repository` | The NGINX image to use. | string | `"ghcr.io/nginx/nginx-gateway-fabric/nginx"` | -| `nginx.image.tag` | | string | `"edge"` | -| `nginx.lifecycle` | The lifecycle of the nginx container. | object | `{}` | -| `nginx.plus` | Is NGINX Plus image being used | bool | `false` | +| `nginx.imagePullSecret` | The name of the secret containing docker registry credentials. Secret must exist in the same namespace as the helm release. The control plane will copy this secret into any namespace where NGINX is deployed. | string | `""` | +| `nginx.imagePullSecrets` | A list of secret names containing docker registry credentials. Secrets must exist in the same namespace as the helm release. The control plane will copy these secrets into any namespace where NGINX is deployed. | list | `[]` | +| `nginx.kind` | The kind of NGINX deployment. | string | `"deployment"` | +| `nginx.plus` | Is NGINX Plus image being used. | bool | `false` | +| `nginx.pod` | The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | +| `nginx.replicas` | The number of replicas of the NGINX Deployment. | int | `1` | +| `nginx.service` | The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"}` | +| `nginx.service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | +| `nginx.service.loadBalancerClass` | LoadBalancerClass is the class of the load balancer implementation this Service belongs to. Requires nginx.service.type set to LoadBalancer. | string | `""` | +| `nginx.service.loadBalancerIP` | The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. | string | `""` | +| `nginx.service.loadBalancerSourceRanges` | The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. | list | `[]` | +| `nginx.service.nodePorts` | A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767. | list | `[]` | +| `nginx.service.type` | The type of service to create for the NGINX data plane. | string | `"LoadBalancer"` | | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.endpoint` | The endpoint of the NGINX Plus usage reporting server. Default: product.connect.nginx.com | string | `""` | | `nginx.usage.resolver` | The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager. | string | `""` | | `nginx.usage.secretName` | The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"nplus-license"` | | `nginx.usage.skipVerify` | Disable client verification of the NGINX Plus usage reporting server certificate. | bool | `false` | +| `nginxGateway` | The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. | object | `{"affinity":{},"config":{"logging":{"level":"info"}},"configAnnotations":{},"extraVolumeMounts":[],"extraVolumes":[],"gatewayClassAnnotations":{},"gatewayClassName":"nginx","gatewayControllerName":"gateway.nginx.org/nginx-gateway-controller","gwAPIExperimentalFeatures":{"enable":false},"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"},"kind":"deployment","labels":{},"leaderElection":{"enable":true,"lockName":""},"lifecycle":{},"metrics":{"enable":true,"port":9113,"secure":false},"nodeSelector":{},"podAnnotations":{},"productTelemetry":{"enable":true},"readinessProbe":{"enable":true,"initialDelaySeconds":3,"port":8081},"replicas":1,"resources":{},"service":{"annotations":{}},"serviceAccount":{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""},"snippetsFilters":{"enable":false},"terminationGracePeriodSeconds":30,"tolerations":[],"topologySpreadConstraints":[]}` | +| `nginxGateway.affinity` | The affinity of the NGINX Gateway Fabric control plane pod. | object | `{}` | | `nginxGateway.config.logging.level` | Log level. | string | `"info"` | | `nginxGateway.configAnnotations` | Set of custom annotations for NginxGateway objects. | object | `{}` | | `nginxGateway.extraVolumeMounts` | extraVolumeMounts are the additional volume mounts for the nginx-gateway container. | list | `[]` | +| `nginxGateway.extraVolumes` | extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with nginxGateway.extraVolumeMounts mount additional volumes to the container. | list | `[]` | | `nginxGateway.gatewayClassAnnotations` | Set of custom annotations for GatewayClass objects. | object | `{}` | | `nginxGateway.gatewayClassName` | The name of the GatewayClass that will be created as part of this release. Every NGINX Gateway Fabric must have a unique corresponding GatewayClass resource. NGINX Gateway Fabric only processes resources that belong to its class - i.e. have the "gatewayClassName" field resource equal to the class. | string | `"nginx"` | | `nginxGateway.gatewayControllerName` | The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain is gateway.nginx.org. | string | `"gateway.nginx.org/nginx-gateway-controller"` | | `nginxGateway.gwAPIExperimentalFeatures.enable` | Enable the experimental features of Gateway API which are supported by NGINX Gateway Fabric. Requires the Gateway APIs installed from the experimental channel. | bool | `false` | -| `nginxGateway.image.pullPolicy` | | string | `"Always"` | +| `nginxGateway.image` | The image configuration for the NGINX Gateway Fabric control plane. | object | `{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"}` | | `nginxGateway.image.repository` | The NGINX Gateway Fabric image to use | string | `"ghcr.io/nginx/nginx-gateway-fabric"` | -| `nginxGateway.image.tag` | | string | `"edge"` | | `nginxGateway.kind` | The kind of the NGINX Gateway Fabric installation - currently, only deployment is supported. | string | `"deployment"` | | `nginxGateway.labels` | Set of labels to be added for NGINX Gateway Fabric deployment. | object | `{}` | | `nginxGateway.leaderElection.enable` | Enable leader election. Leader election is used to avoid multiple replicas of the NGINX Gateway Fabric reporting the status of the Gateway API resources. If not enabled, all replicas of NGINX Gateway Fabric will update the statuses of the Gateway API resources. | bool | `true` | | `nginxGateway.leaderElection.lockName` | The name of the leader election lock. A Lease object with this name will be created in the same Namespace as the controller. | string | Autogenerated if not set or set to "". | | `nginxGateway.lifecycle` | The lifecycle of the nginx-gateway container. | object | `{}` | +| `nginxGateway.metrics.enable` | Enable exposing metrics in the Prometheus format. | bool | `true` | +| `nginxGateway.metrics.port` | Set the port where the Prometheus metrics are exposed. | int | `9113` | +| `nginxGateway.metrics.secure` | Enable serving metrics via https. By default metrics are served via http. Please note that this endpoint will be secured with a self-signed certificate. | bool | `false` | +| `nginxGateway.nodeSelector` | The nodeSelector of the NGINX Gateway Fabric control plane pod. | object | `{}` | | `nginxGateway.podAnnotations` | Set of custom annotations for the NGINX Gateway Fabric pods. | object | `{}` | | `nginxGateway.productTelemetry.enable` | Enable the collection of product telemetry. | bool | `true` | | `nginxGateway.readinessProbe.enable` | Enable the /readyz endpoint on the control plane. | bool | `true` | | `nginxGateway.readinessProbe.initialDelaySeconds` | The number of seconds after the Pod has started before the readiness probes are initiated. | int | `3` | | `nginxGateway.readinessProbe.port` | Port in which the readiness endpoint is exposed. | int | `8081` | -| `nginxGateway.replicaCount` | The number of replicas of the NGINX Gateway Fabric Deployment. | int | `1` | +| `nginxGateway.replicas` | The number of replicas of the NGINX Gateway Fabric Deployment. | int | `1` | | `nginxGateway.resources` | The resource requests and/or limits of the nginx-gateway container. | object | `{}` | -| `nginxGateway.securityContext.allowPrivilegeEscalation` | Some environments may need this set to true in order for the control plane to successfully reload NGINX. | bool | `false` | +| `nginxGateway.service` | The service configuration for the NGINX Gateway Fabric control plane. | object | `{"annotations":{}}` | +| `nginxGateway.service.annotations` | The annotations of the NGINX Gateway Fabric control plane service. | object | `{}` | +| `nginxGateway.serviceAccount` | The serviceaccount configuration for the NGINX Gateway Fabric control plane. | object | `{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""}` | +| `nginxGateway.serviceAccount.annotations` | Set of custom annotations for the NGINX Gateway Fabric control plane service account. | object | `{}` | +| `nginxGateway.serviceAccount.imagePullSecret` | The name of the secret containing docker registry credentials for the control plane. Secret must exist in the same namespace as the helm release. | string | `""` | +| `nginxGateway.serviceAccount.imagePullSecrets` | A list of secret names containing docker registry credentials for the control plane. Secrets must exist in the same namespace as the helm release. | list | `[]` | +| `nginxGateway.serviceAccount.name` | The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC. | string | Autogenerated if not set or set to "" | | `nginxGateway.snippetsFilters.enable` | Enable SnippetsFilters feature. SnippetsFilters allow inserting NGINX configuration into the generated NGINX config for HTTPRoute and GRPCRoute resources. | bool | `false` | -| `nodeSelector` | The nodeSelector of the NGINX Gateway Fabric pod. | object | `{}` | -| `service.annotations` | The annotations of the NGINX Gateway Fabric service. | object | `{}` | -| `service.create` | Creates a service to expose the NGINX Gateway Fabric pods. | bool | `true` | -| `service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | -| `service.loadBalancerIP` | The static IP address for the load balancer. Requires service.type set to LoadBalancer. | string | `""` | -| `service.loadBalancerSourceRanges` | The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer. | list | `[]` | -| `service.ports` | A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. | list | `[{"name":"http","port":80,"protocol":"TCP","targetPort":80},{"name":"https","port":443,"protocol":"TCP","targetPort":443}]` | -| `service.type` | The type of service to create for the NGINX Gateway Fabric. | string | `"LoadBalancer"` | -| `serviceAccount.annotations` | Set of custom annotations for the NGINX Gateway Fabric service account. | object | `{}` | -| `serviceAccount.imagePullSecret` | The name of the secret containing docker registry credentials. Secret must exist in the same namespace as the helm release. | string | `""` | -| `serviceAccount.imagePullSecrets` | A list of secret names containing docker registry credentials. Secrets must exist in the same namespace as the helm release. | list | `[]` | -| `serviceAccount.name` | The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. | string | Autogenerated if not set or set to "" | -| `terminationGracePeriodSeconds` | The termination grace period of the NGINX Gateway Fabric pod. | int | `30` | -| `tolerations` | Tolerations for the NGINX Gateway Fabric pod. | list | `[]` | -| `topologySpreadConstraints` | The topology spread constraints for the NGINX Gateway Fabric pod. | list | `[]` | +| `nginxGateway.terminationGracePeriodSeconds` | The termination grace period of the NGINX Gateway Fabric control plane pod. | int | `30` | +| `nginxGateway.tolerations` | Tolerations for the NGINX Gateway Fabric control plane pod. | list | `[]` | +| `nginxGateway.topologySpreadConstraints` | The topology spread constraints for the NGINX Gateway Fabric control plane pod. | list | `[]` | ---------------------------------------------- Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs) diff --git a/charts/nginx-gateway-fabric/README.md.gotmpl b/charts/nginx-gateway-fabric/README.md.gotmpl index f89de6bd00..f757a7cc8f 100644 --- a/charts/nginx-gateway-fabric/README.md.gotmpl +++ b/charts/nginx-gateway-fabric/README.md.gotmpl @@ -110,13 +110,7 @@ By default, the NGINX Gateway Fabric helm chart deploys a LoadBalancer Service. To use a NodePort Service instead: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.type=NodePort -``` - -To disable the creation of a Service: - -```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.create=false +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` ## Upgrading the Chart @@ -251,6 +245,9 @@ kubectl kustomize https://github.com/nginx/nginx-gateway-fabric/config/crd/gatew The following table lists the configurable parameters of the NGINX Gateway Fabric chart and their default values. +> More granular configuration options may not show up in this table. +> Viewing the `values.yaml` file directly can show all available options. + {{ template "chart.valuesTable" . }} ---------------------------------------------- diff --git a/charts/nginx-gateway-fabric/templates/_helpers.tpl b/charts/nginx-gateway-fabric/templates/_helpers.tpl index 65b6c5e6f8..01155eb707 100644 --- a/charts/nginx-gateway-fabric/templates/_helpers.tpl +++ b/charts/nginx-gateway-fabric/templates/_helpers.tpl @@ -78,7 +78,7 @@ app.kubernetes.io/instance: {{ .Release.Name }} Create the name of the ServiceAccount to use */}} {{- define "nginx-gateway.serviceAccountName" -}} -{{- default (include "nginx-gateway.fullname" .) .Values.serviceAccount.name }} +{{- default (include "nginx-gateway.fullname" .) .Values.nginxGateway.serviceAccount.name }} {{- end }} {{/* @@ -91,3 +91,16 @@ Expand leader election lock name. {{- printf "%s-%s" (include "nginx-gateway.fullname" .) "leader-election" -}} {{- end -}} {{- end -}} + +{{/* +Filters out empty fields from a struct. +*/}} +{{- define "filterEmptyFields" -}} +{{- $result := dict }} +{{- range $key, $value := . }} + {{- if and (not (empty $value)) (not (and (kindIs "slice" $value) (eq (len $value) 0))) }} + {{- $result = merge $result (dict $key $value) }} + {{- end }} +{{- end }} +{{- $result | toYaml }} +{{- end }} diff --git a/charts/nginx-gateway-fabric/templates/certs-job.yaml b/charts/nginx-gateway-fabric/templates/certs-job.yaml new file mode 100644 index 0000000000..96da6289e2 --- /dev/null +++ b/charts/nginx-gateway-fabric/templates/certs-job.yaml @@ -0,0 +1,156 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +{{- if or .Values.nginxGateway.serviceAccount.imagePullSecret .Values.nginxGateway.serviceAccount.imagePullSecrets }} +imagePullSecrets: + {{- if .Values.nginxGateway.serviceAccount.imagePullSecret }} + - name: {{ .Values.nginxGateway.serviceAccount.imagePullSecret }} + {{- end }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecrets }} + {{- range .Values.nginxGateway.serviceAccount.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "nginx-gateway.fullname" . }}-cert-generator +subjects: +- kind: ServiceAccount + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} +--- +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ include "nginx-gateway.scc-name" . }}-cert-generator + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook-weight": "-1" + "helm.sh/hook": pre-install +allowPrivilegeEscalation: false +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: true +runAsUser: + type: MustRunAsRange + uidRangeMin: 101 + uidRangeMax: 101 +fsGroup: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +supplementalGroups: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +users: +- {{ printf "system:serviceaccount:%s:%s-cert-generator" .Release.Namespace (include "nginx-gateway.fullname" .) }} +requiredDropCapabilities: +- ALL +volumes: +- projected +--- +{{- end }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + {{- with .Values.certGenerator.annotations -}} + {{ toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": pre-install, pre-upgrade +spec: + template: + metadata: + annotations: + {{- with .Values.certGenerator.annotations -}} + {{ toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - args: + - generate-certs + - --service={{ include "nginx-gateway.fullname" . }} + - --cluster-domain={{ .Values.clusterDomain }} + - --server-tls-secret={{ .Values.certGenerator.serverTLSSecretName }} + - --agent-tls-secret={{ .Values.certGenerator.agentTLSSecretName }} + {{- if .Values.certGenerator.overwrite }} + - --overwrite + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} + imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} + name: cert-generator + securityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 101 + runAsGroup: 1001 + restartPolicy: Never + serviceAccountName: {{ include "nginx-gateway.fullname" . }}-cert-generator + securityContext: + fsGroup: 1001 + runAsNonRoot: true + ttlSecondsAfterFinished: 0 diff --git a/charts/nginx-gateway-fabric/templates/clusterrole.yaml b/charts/nginx-gateway-fabric/templates/clusterrole.yaml index 9ee1be4254..6266134602 100644 --- a/charts/nginx-gateway-fabric/templates/clusterrole.yaml +++ b/charts/nginx-gateway-fabric/templates/clusterrole.yaml @@ -7,39 +7,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets -{{- if .Values.nginxGateway.gwAPIExperimentalFeatures.enable }} - configmaps -{{- end }} + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch -{{- if or .Values.nginxGateway.productTelemetry.enable .Values.nginx.plus }} - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get -{{- end }} -{{- if .Values.nginx.plus }} -- apiGroups: - - apps - resources: - - replicasets - verbs: - list -{{- end }} {{- if or .Values.nginxGateway.productTelemetry.enable .Values.nginx.plus }} - apiGroups: - "" @@ -62,6 +59,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -147,6 +150,19 @@ rules: - securitycontextconstraints resourceNames: - {{ include "nginx-gateway.scc-name" . }} + - {{ include "nginx-gateway.scc-name" . }}-nginx verbs: - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - update + - delete + - list + - get + - watch {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/configmap.yaml b/charts/nginx-gateway-fabric/templates/configmap.yaml deleted file mode 100644 index 8b99c60650..0000000000 --- a/charts/nginx-gateway-fabric/templates/configmap.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-includes-bootstrap - namespace: {{ .Release.Namespace }} - labels: - {{- include "nginx-gateway.labels" . | nindent 4 }} -data: - main.conf: | - {{- if and .Values.nginx.config .Values.nginx.config.logging .Values.nginx.config.logging.errorLevel }} - error_log stderr {{ .Values.nginx.config.logging.errorLevel }}; - {{ else }} - error_log stderr info; - {{- end }} - {{- if .Values.nginx.plus }} - mgmt.conf: | - mgmt { - {{- if .Values.nginx.usage.endpoint }} - usage_report endpoint={{ .Values.nginx.usage.endpoint }}; - {{- end }} - {{- if .Values.nginx.usage.skipVerify }} - ssl_verify off; - {{- end }} - {{- if .Values.nginx.usage.caSecretName }} - ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; - {{- end }} - {{- if .Values.nginx.usage.clientSSLSecretName }} - ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; - ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; - {{- end }} - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } - {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 25d0547f3a..8fee4b36f2 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -10,7 +10,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - replicas: {{ .Values.nginxGateway.replicaCount }} + replicas: {{ .Values.nginxGateway.replicas }} selector: matchLabels: {{- include "nginx-gateway.selectorLabels" . | nindent 6 }} @@ -21,68 +21,36 @@ spec: {{- with .Values.nginxGateway.labels }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if or .Values.nginxGateway.podAnnotations .Values.metrics.enable }} + {{- if or .Values.nginxGateway.podAnnotations .Values.nginxGateway.metrics.enable }} annotations: {{- if .Values.nginxGateway.podAnnotations }} {{- toYaml .Values.nginxGateway.podAnnotations | nindent 8 }} {{- end }} - {{- if .Values.metrics.enable }} + {{- if .Values.nginxGateway.metrics.enable }} prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.port }}" - {{- if .Values.metrics.secure }} + prometheus.io/port: "{{ .Values.nginxGateway.metrics.port }}" + {{- if .Values.nginxGateway.metrics.secure }} prometheus.io/scheme: "https" {{- end }} {{- end }} {{- end }} spec: - {{- if .Values.topologySpreadConstraints }} - topologySpreadConstraints: - {{- toYaml .Values.topologySpreadConstraints | nindent 8 }} - {{- end }} - initContainers: - - name: init - image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} - imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - {{- if .Values.nginx.plus }} - - --source - - /includes/mgmt.conf - - --nginx-plus - {{- end }} - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-includes-bootstrap - mountPath: /includes - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes containers: - args: - - static-mode + - controller - --gateway-ctlr-name={{ .Values.nginxGateway.gatewayControllerName }} - --gatewayclass={{ .Values.nginxGateway.gatewayClassName }} - --config={{ include "nginx-gateway.config-name" . }} - --service={{ include "nginx-gateway.fullname" . }} + - --agent-tls-secret={{ .Values.certGenerator.agentTLSSecretName }} + {{- if .Values.nginx.imagePullSecret }} + - --nginx-docker-secret={{ .Values.nginx.imagePullSecret }} + {{- end }} + {{- if .Values.nginx.imagePullSecrets }} + {{- range .Values.nginx.imagePullSecrets }} + - --nginx-docker-secret={{ . }} + {{- end }} + {{- end }} {{- if .Values.nginx.plus }} - --nginx-plus {{- if .Values.nginx.usage.secretName }} @@ -104,9 +72,9 @@ spec: - --usage-report-client-ssl-secret={{ .Values.nginx.usage.clientSSLSecretName }} {{- end }} {{- end }} - {{- if .Values.metrics.enable }} - - --metrics-port={{ .Values.metrics.port }} - {{- if .Values.metrics.secure }} + {{- if .Values.nginxGateway.metrics.enable }} + - --metrics-port={{ .Values.nginxGateway.metrics.port }} + {{- if .Values.nginxGateway.metrics.secure }} - --metrics-secure-serving {{- end }} {{- else }} @@ -131,11 +99,10 @@ spec: {{- if .Values.nginxGateway.snippetsFilters.enable }} - --snippets-filters {{- end }} + {{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} + - --nginx-scc={{ include "nginx-gateway.scc-name" . }}-nginx + {{- end}} env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -148,6 +115,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} name: nginx-gateway @@ -160,9 +133,11 @@ spec: {{- toYaml .Values.nginxGateway.resources | nindent 10 }} {{- end }} ports: - {{- if .Values.metrics.enable }} + - name: agent-grpc + containerPort: 8443 + {{- if .Values.nginxGateway.metrics.enable }} - name: metrics - containerPort: {{ .Values.metrics.port }} + containerPort: {{ .Values.nginxGateway.metrics.port }} {{- end }} {{- if .Values.nginxGateway.readinessProbe.enable }} - name: health @@ -177,151 +152,45 @@ spec: securityContext: seccompProfile: type: RuntimeDefault - allowPrivilegeEscalation: {{ .Values.nginxGateway.securityContext.allowPrivilegeEscalation }} capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - {{- with .Values.nginxGateway.extraVolumeMounts -}} - {{ toYaml . | nindent 8 }} - {{- end }} - - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} - imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} - name: nginx - {{- if .Values.nginx.lifecycle }} - lifecycle: - {{- toYaml .Values.nginx.lifecycle | nindent 10 }} - {{- end }} - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL + allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 101 runAsGroup: 1001 volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-cache - mountPath: /var/cache/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - {{- if .Values.nginx.plus }} - - name: nginx-lib - mountPath: /var/lib/nginx/state - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license - mountPath: /etc/nginx/license.jwt - subPath: license.jwt - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - mountPath: /etc/nginx/certs-bootstrap/ - {{- end }} - {{- end }} - {{- with .Values.nginx.extraVolumeMounts -}} + - name: nginx-agent-tls + mountPath: /var/run/secrets/ngf + {{- with .Values.nginxGateway.extraVolumeMounts -}} {{ toYaml . | nindent 8 }} {{- end }} - {{- if .Values.nginx.debug }} - command: - - "/bin/sh" - args: - - "-c" - - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- if .Values.affinity }} + {{- if .Values.nginxGateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml .Values.nginxGateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.nginxGateway.terminationGracePeriodSeconds }} + {{- if .Values.nginxGateway.affinity }} affinity: - {{- toYaml .Values.affinity | nindent 8 }} + {{- toYaml .Values.nginxGateway.affinity | nindent 8 }} {{- end }} serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} - shareProcessNamespace: true securityContext: fsGroup: 1001 runAsNonRoot: true - {{- if .Values.tolerations }} + {{- if .Values.nginxGateway.tolerations }} tolerations: - {{- toYaml .Values.tolerations | nindent 6 }} + {{- toYaml .Values.nginxGateway.tolerations | nindent 6 }} {{- end }} - {{- if .Values.nodeSelector }} + {{- if .Values.nginxGateway.nodeSelector }} nodeSelector: - {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- toYaml .Values.nginxGateway.nodeSelector | nindent 8 }} {{- end }} volumes: - - name: nginx-conf - emptyDir: {} - - name: nginx-stream-conf - emptyDir: {} - - name: nginx-main-includes - emptyDir: {} - - name: nginx-secrets - emptyDir: {} - - name: nginx-run - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-includes - emptyDir: {} - - name: nginx-includes-bootstrap - configMap: - name: nginx-includes-bootstrap - {{- if .Values.nginx.plus }} - - name: nginx-lib - emptyDir: {} - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license + - name: nginx-agent-tls secret: - secretName: {{ .Values.nginx.usage.secretName }} - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - projected: - sources: - {{- if .Values.nginx.usage.caSecretName }} - - secret: - name: {{ .Values.nginx.usage.caSecretName }} - {{- end }} - {{- if .Values.nginx.usage.clientSSLSecretName }} - - secret: - name: {{ .Values.nginx.usage.clientSSLSecretName }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.extraVolumes -}} + secretName: {{ .Values.certGenerator.serverTLSSecretName }} + {{- with .Values.nginxGateway.extraVolumes -}} {{ toYaml . | nindent 6 }} {{- end }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml index ee08e1a726..b6905cd33c 100644 --- a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml +++ b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml @@ -12,9 +12,8 @@ metadata: {{- end }} spec: controllerName: {{ .Values.nginxGateway.gatewayControllerName }} - {{- if .Values.nginx.config }} parametersRef: group: gateway.nginx.org kind: NginxProxy name: {{ include "nginx-gateway.proxy-config-name" . }} - {{- end }} + namespace: {{ .Release.Namespace }} diff --git a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml index 4214158b75..56e4de6943 100644 --- a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml +++ b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml @@ -1,10 +1,35 @@ -{{- if .Values.nginx.config }} -apiVersion: gateway.nginx.org/v1alpha1 +apiVersion: gateway.nginx.org/v1alpha2 kind: NginxProxy metadata: name: {{ include "nginx-gateway.proxy-config-name" . }} + namespace: {{ .Release.Namespace }} labels: {{- include "nginx-gateway.labels" . | nindent 4 }} spec: + {{- if .Values.nginx.config }} {{- toYaml .Values.nginx.config | nindent 2 }} -{{- end }} + {{- end }} + kubernetes: + {{- if eq .Values.nginx.kind "deployment" }} + deployment: + replicas: {{ .Values.nginx.replicas }} + {{- if .Values.nginx.pod }} + pod: + {{- toYaml .Values.nginx.pod | nindent 8 }} + {{- end }} + container: + {{- if .Values.nginx.container }} + {{- toYaml .Values.nginx.container | nindent 8 }} + {{- end }} + image: + {{- toYaml .Values.nginx.image | nindent 10 }} + {{- if .Values.nginx.debug }} + debug: {{ .Values.nginx.debug }} + {{- end }} + {{- end }} + {{- if .Values.nginx.service }} + service: + {{- with .Values.nginx.service }} + {{- include "filterEmptyFields" . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/scc.yaml b/charts/nginx-gateway-fabric/templates/scc.yaml index 8156b279b7..783300c3fe 100644 --- a/charts/nginx-gateway-fabric/templates/scc.yaml +++ b/charts/nginx-gateway-fabric/templates/scc.yaml @@ -3,7 +3,9 @@ kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 metadata: name: {{ include "nginx-gateway.scc-name" . }} -allowPrivilegeEscalation: {{ .Values.nginxGateway.securityContext.allowPrivilegeEscalation }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} +allowPrivilegeEscalation: false allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false @@ -14,7 +16,7 @@ readOnlyRootFilesystem: true runAsUser: type: MustRunAsRange uidRangeMin: 101 - uidRangeMax: 102 + uidRangeMax: 101 fsGroup: type: MustRunAs ranges: @@ -29,16 +31,51 @@ seLinuxContext: type: MustRunAs seccompProfiles: - runtime/default -volumes: -- emptyDir -- secret -- configMap -- projected users: - {{ printf "system:serviceaccount:%s:%s" .Release.Namespace (include "nginx-gateway.serviceAccountName" .) }} +requiredDropCapabilities: +- ALL +volumes: +- secret +--- +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ include "nginx-gateway.scc-name" . }}-nginx + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: true +runAsUser: + type: MustRunAsRange + uidRangeMin: 101 + uidRangeMax: 101 +fsGroup: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +supplementalGroups: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default allowedCapabilities: - NET_BIND_SERVICE -- KILL requiredDropCapabilities: - ALL +volumes: +- emptyDir +- secret +- configMap +- projected {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/service.yaml b/charts/nginx-gateway-fabric/templates/service.yaml index a80686dc7e..6a0ed7cfef 100644 --- a/charts/nginx-gateway-fabric/templates/service.yaml +++ b/charts/nginx-gateway-fabric/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.service.create }} apiVersion: v1 kind: Service metadata: @@ -6,30 +5,16 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "nginx-gateway.labels" . | nindent 4 }} -{{- if .Values.service.annotations }} +{{- if .Values.nginxGateway.service.annotations }} annotations: -{{ toYaml .Values.service.annotations | indent 4 }} +{{ toYaml .Values.nginxGateway.service.annotations | indent 4 }} {{- end }} spec: -{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} - {{- if .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} - {{- end }} -{{- end }} - type: {{ .Values.service.type }} -{{- if eq .Values.service.type "LoadBalancer" }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} - {{- end }} -{{- end}} + type: ClusterIP selector: {{- include "nginx-gateway.selectorLabels" . | nindent 4 }} - ports: # Update the following ports to match your Gateway Listener ports -{{- if .Values.service.ports }} -{{ toYaml .Values.service.ports | indent 2 }} -{{ end }} -{{- end }} + ports: + - name: agent-grpc + port: 443 + protocol: TCP + targetPort: 8443 diff --git a/charts/nginx-gateway-fabric/templates/serviceaccount.yaml b/charts/nginx-gateway-fabric/templates/serviceaccount.yaml index 069a2066b9..fa3439759d 100644 --- a/charts/nginx-gateway-fabric/templates/serviceaccount.yaml +++ b/charts/nginx-gateway-fabric/templates/serviceaccount.yaml @@ -6,14 +6,14 @@ metadata: labels: {{- include "nginx-gateway.labels" . | nindent 4 }} annotations: - {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} -{{- if or .Values.serviceAccount.imagePullSecret .Values.serviceAccount.imagePullSecrets }} + {{- toYaml .Values.nginxGateway.serviceAccount.annotations | nindent 4 }} +{{- if or .Values.nginxGateway.serviceAccount.imagePullSecret .Values.nginxGateway.serviceAccount.imagePullSecrets }} imagePullSecrets: - {{- if .Values.serviceAccount.imagePullSecret }} - - name: {{ .Values.serviceAccount.imagePullSecret }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecret }} + - name: {{ .Values.nginxGateway.serviceAccount.imagePullSecret }} {{- end }} - {{- if .Values.serviceAccount.imagePullSecrets }} - {{- range .Values.serviceAccount.imagePullSecrets }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecrets }} + {{- range .Values.nginxGateway.serviceAccount.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 5ef6236304..67250a526f 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -1,61 +1,59 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { - "affinity": { - "description": "The affinity of the NGINX Gateway Fabric pod.", - "required": [], - "title": "affinity", - "type": "object" - }, - "extraVolumes": { - "description": "extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with\nnginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers.", - "items": { - "required": [] - }, - "required": [], - "title": "extraVolumes", - "type": "array" - }, - "global": { - "description": "Global values are values that can be accessed from any chart or subchart by exactly the same name.", - "required": [], - "title": "global", - "type": "object" - }, - "metrics": { + "certGenerator": { + "description": "The certGenerator section contains the configuration for the cert-generator Job.", "properties": { - "enable": { - "default": true, - "description": "Enable exposing metrics in the Prometheus format.", + "agentTLSSecretName": { + "default": "agent-tls", + "description": "The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely\ncommunicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the\nNGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).", "required": [], - "title": "enable", - "type": "boolean" + "title": "agentTLSSecretName", + "type": "string" }, - "port": { - "default": 9113, - "description": "Set the port where the Prometheus metrics are exposed.", - "maximum": 65535, - "minimum": 1, + "annotations": { + "description": "The annotations of the cert-generator Job.", "required": [], - "title": "port", - "type": "integer" + "title": "annotations", + "type": "object" }, - "secure": { + "overwrite": { "default": false, - "description": "Enable serving metrics via https. By default metrics are served via http.\nPlease note that this endpoint will be secured with a self-signed certificate.", + "description": "Overwrite existing TLS Secrets on startup.", "required": [], - "title": "secure", + "title": "overwrite", "type": "boolean" + }, + "serverTLSSecretName": { + "default": "server-tls", + "description": "The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane\nto securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric\ncontrol plane is running in (default namespace: nginx-gateway).", + "required": [], + "title": "serverTLSSecretName", + "type": "string" } }, "required": [], - "title": "metrics", + "title": "certGenerator", + "type": "object" + }, + "clusterDomain": { + "default": "cluster.local", + "description": "The DNS cluster domain of your Kubernetes cluster.", + "required": [], + "title": "clusterDomain", + "type": "string" + }, + "global": { + "description": "Global values are values that can be accessed from any chart or subchart by exactly the same name.", + "required": [], + "title": "global", "type": "object" }, "nginx": { + "description": "The nginx section contains the configuration for all NGINX data plane deployments\ninstalled by the NGINX Gateway Fabric control plane.", "properties": { "config": { - "description": "The configuration for the data plane that is contained in the NginxProxy resource.", + "description": "The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways\nmanaged by this instance of NGINX Gateway Fabric.", "properties": { "disableHTTP2": { "description": "DisableHTTP2 defines if http2 should be disabled for all servers.", @@ -75,6 +73,17 @@ "logging": { "description": "Logging defines logging related settings for NGINX.", "properties": { + "agentLevel": { + "enum": [ + "debug", + "info", + "error", + "panic", + "fatal" + ], + "required": [], + "type": "string" + }, "errorLevel": { "enum": [ "debug", @@ -93,6 +102,23 @@ "required": [], "type": "object" }, + "metrics": { + "description": "Metrics defines the configuration for Prometheus scraping metrics.", + "properties": { + "disable": { + "required": [], + "type": "boolean" + }, + "port": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + } + }, + "required": [], + "type": "object" + }, "nginxPlus": { "description": "NginxPlus specifies NGINX Plus additional settings.", "properties": { @@ -165,6 +191,17 @@ "telemetry": { "description": "Telemetry specifies the OpenTelemetry configuration.", "properties": { + "disabledFeatures": { + "items": { + "enum": [ + "DisableTracing" + ], + "required": [], + "type": "string" + }, + "required": [], + "type": "array" + }, "exporter": { "properties": { "batchCount": { @@ -228,6 +265,12 @@ "title": "config", "type": "object" }, + "container": { + "description": "The container configuration for the NGINX container. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", + "required": [], + "title": "container", + "type": "object" + }, "debug": { "default": false, "description": "Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource.", @@ -235,15 +278,6 @@ "title": "debug", "type": "boolean" }, - "extraVolumeMounts": { - "description": "extraVolumeMounts are the additional volume mounts for the nginx container.", - "items": { - "required": [] - }, - "required": [], - "title": "extraVolumeMounts", - "type": "array" - }, "image": { "properties": { "pullPolicy": { @@ -274,19 +308,127 @@ "title": "image", "type": "object" }, - "lifecycle": { - "description": "The lifecycle of the nginx container.", + "imagePullSecret": { + "default": "", + "description": "The name of the secret containing docker registry credentials.\nSecret must exist in the same namespace as the helm release. The control\nplane will copy this secret into any namespace where NGINX is deployed.", "required": [], - "title": "lifecycle", - "type": "object" + "title": "imagePullSecret", + "type": "string" + }, + "imagePullSecrets": { + "description": "A list of secret names containing docker registry credentials.\nSecrets must exist in the same namespace as the helm release. The control\nplane will copy these secrets into any namespace where NGINX is deployed.", + "items": { + "required": [] + }, + "required": [], + "title": "imagePullSecrets", + "type": "array" + }, + "kind": { + "default": "deployment", + "description": "The kind of NGINX deployment.", + "enum": [ + "deployment" + ], + "required": [], + "title": "kind" }, "plus": { "default": false, - "description": "Is NGINX Plus image being used", + "description": "Is NGINX Plus image being used.", "required": [], "title": "plus", "type": "boolean" }, + "pod": { + "description": "The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", + "required": [], + "title": "pod", + "type": "object" + }, + "replicas": { + "default": 1, + "description": "The number of replicas of the NGINX Deployment.", + "required": [], + "title": "replicas", + "type": "integer" + }, + "service": { + "description": "The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", + "properties": { + "externalTrafficPolicy": { + "default": "Local", + "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", + "enum": [ + "Cluster", + "Local" + ], + "required": [], + "title": "externalTrafficPolicy" + }, + "loadBalancerClass": { + "default": "", + "description": "LoadBalancerClass is the class of the load balancer implementation this Service belongs to.\nRequires nginx.service.type set to LoadBalancer.", + "required": [], + "title": "loadBalancerClass", + "type": "string" + }, + "loadBalancerIP": { + "default": "", + "description": "The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer.", + "required": [], + "title": "loadBalancerIP", + "type": "string" + }, + "loadBalancerSourceRanges": { + "description": "The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer.", + "items": { + "required": [] + }, + "required": [], + "title": "loadBalancerSourceRanges", + "type": "array" + }, + "nodePorts": { + "description": "A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port,\notherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767.", + "items": { + "properties": { + "listenerPort": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + }, + "port": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + } + }, + "required": [], + "type": "object" + }, + "required": [], + "title": "nodePorts", + "type": "array" + }, + "type": { + "default": "LoadBalancer", + "description": "The type of service to create for the NGINX data plane.", + "enum": [ + "ClusterIP", + "NodePort", + "LoadBalancer" + ], + "required": [], + "title": "type" + } + }, + "required": [], + "title": "service", + "type": "object" + }, "usage": { "description": "Configuration for NGINX Plus usage reporting.", "properties": { @@ -343,7 +485,14 @@ "type": "object" }, "nginxGateway": { + "description": "The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment.", "properties": { + "affinity": { + "description": "The affinity of the NGINX Gateway Fabric control plane pod.", + "required": [], + "title": "affinity", + "type": "object" + }, "config": { "description": "The dynamic configuration for the control plane that is contained in the NginxGateway resource.", "properties": { @@ -385,6 +534,15 @@ "title": "extraVolumeMounts", "type": "array" }, + "extraVolumes": { + "description": "extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with\nnginxGateway.extraVolumeMounts mount additional volumes to the container.", + "items": { + "required": [] + }, + "required": [], + "title": "extraVolumes", + "type": "array" + }, "gatewayClassAnnotations": { "description": "Set of custom annotations for GatewayClass objects.", "required": [], @@ -420,6 +578,7 @@ "type": "object" }, "image": { + "description": "The image configuration for the NGINX Gateway Fabric control plane.", "properties": { "pullPolicy": { "default": "Always", @@ -490,6 +649,42 @@ "title": "lifecycle", "type": "object" }, + "metrics": { + "properties": { + "enable": { + "default": true, + "description": "Enable exposing metrics in the Prometheus format.", + "required": [], + "title": "enable", + "type": "boolean" + }, + "port": { + "default": 9113, + "description": "Set the port where the Prometheus metrics are exposed.", + "maximum": 65535, + "minimum": 1, + "required": [], + "title": "port", + "type": "integer" + }, + "secure": { + "default": false, + "description": "Enable serving metrics via https. By default metrics are served via http.\nPlease note that this endpoint will be secured with a self-signed certificate.", + "required": [], + "title": "secure", + "type": "boolean" + } + }, + "required": [], + "title": "metrics", + "type": "object" + }, + "nodeSelector": { + "description": "The nodeSelector of the NGINX Gateway Fabric control plane pod.", + "required": [], + "title": "nodeSelector", + "type": "object" + }, "podAnnotations": { "description": "Set of custom annotations for the NGINX Gateway Fabric pods.", "required": [], @@ -511,7 +706,7 @@ "type": "object" }, "readinessProbe": { - "description": "# Defines the settings for the control plane readiness probe. This probe returns Ready when the controller\n# has started and configured NGINX to serve traffic.", + "description": "# Defines the settings for the control plane readiness probe. This probe returns Ready when the controller\n# has started and is ready to configure NGINX.", "properties": { "enable": { "default": true, @@ -541,11 +736,11 @@ "title": "readinessProbe", "type": "object" }, - "replicaCount": { + "replicas": { "default": 1, "description": "The number of replicas of the NGINX Gateway Fabric Deployment.", "required": [], - "title": "replicaCount", + "title": "replicas", "type": "integer" }, "resources": { @@ -554,18 +749,55 @@ "title": "resources", "type": "object" }, - "securityContext": { + "service": { + "description": "The service configuration for the NGINX Gateway Fabric control plane.", "properties": { - "allowPrivilegeEscalation": { - "default": false, - "description": "Some environments may need this set to true in order for the control plane to successfully reload NGINX.", + "annotations": { + "description": "The annotations of the NGINX Gateway Fabric control plane service.", "required": [], - "title": "allowPrivilegeEscalation", - "type": "boolean" + "title": "annotations", + "type": "object" } }, "required": [], - "title": "securityContext", + "title": "service", + "type": "object" + }, + "serviceAccount": { + "description": "The serviceaccount configuration for the NGINX Gateway Fabric control plane.", + "properties": { + "annotations": { + "description": "Set of custom annotations for the NGINX Gateway Fabric control plane service account.", + "required": [], + "title": "annotations", + "type": "object" + }, + "imagePullSecret": { + "default": "", + "description": "The name of the secret containing docker registry credentials for the control plane.\nSecret must exist in the same namespace as the helm release.", + "required": [], + "title": "imagePullSecret", + "type": "string" + }, + "imagePullSecrets": { + "description": "A list of secret names containing docker registry credentials for the control plane.\nSecrets must exist in the same namespace as the helm release.", + "items": { + "required": [] + }, + "required": [], + "title": "imagePullSecrets", + "type": "array" + }, + "name": { + "default": "", + "description": "The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC.", + "required": [], + "title": "name", + "type": "string" + } + }, + "required": [], + "title": "serviceAccount", "type": "object" }, "snippetsFilters": { @@ -581,174 +813,39 @@ "required": [], "title": "snippetsFilters", "type": "object" - } - }, - "required": [ - "gatewayClassName", - "gatewayControllerName" - ], - "title": "nginxGateway", - "type": "object" - }, - "nodeSelector": { - "description": "The nodeSelector of the NGINX Gateway Fabric pod.", - "required": [], - "title": "nodeSelector", - "type": "object" - }, - "service": { - "properties": { - "annotations": { - "description": "The annotations of the NGINX Gateway Fabric service.", - "required": [], - "title": "annotations", - "type": "object" - }, - "create": { - "default": true, - "description": "Creates a service to expose the NGINX Gateway Fabric pods.", - "required": [], - "title": "create", - "type": "boolean" - }, - "externalTrafficPolicy": { - "default": "Local", - "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", - "enum": [ - "Cluster", - "Local" - ], - "required": [], - "title": "externalTrafficPolicy" }, - "loadBalancerIP": { - "default": "", - "description": "The static IP address for the load balancer. Requires service.type set to LoadBalancer.", + "terminationGracePeriodSeconds": { + "default": 30, + "description": "The termination grace period of the NGINX Gateway Fabric control plane pod.", "required": [], - "title": "loadBalancerIP", - "type": "string" + "title": "terminationGracePeriodSeconds", + "type": "integer" }, - "loadBalancerSourceRanges": { - "description": "The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer.", + "tolerations": { + "description": "Tolerations for the NGINX Gateway Fabric control plane pod.", "items": { "required": [] }, "required": [], - "title": "loadBalancerSourceRanges", - "type": "array" - }, - "ports": { - "description": "A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from\nyour Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports.", - "items": { - "properties": { - "name": { - "required": [], - "type": "string" - }, - "port": { - "maximum": 65535, - "minimum": 1, - "required": [], - "type": "integer" - }, - "protocol": { - "enum": [ - "TCP", - "UDP" - ], - "required": [], - "type": "string" - }, - "targetPort": { - "maximum": 65535, - "minimum": 1, - "required": [], - "type": "integer" - } - }, - "required": [], - "type": "object" - }, - "required": [], - "title": "ports", + "title": "tolerations", "type": "array" }, - "type": { - "default": "LoadBalancer", - "description": "The type of service to create for the NGINX Gateway Fabric.", - "enum": [ - "ClusterIP", - "NodePort", - "LoadBalancer" - ], - "required": [], - "title": "type" - } - }, - "required": [], - "title": "service", - "type": "object" - }, - "serviceAccount": { - "properties": { - "annotations": { - "description": "Set of custom annotations for the NGINX Gateway Fabric service account.", - "required": [], - "title": "annotations", - "type": "object" - }, - "imagePullSecret": { - "default": "", - "description": "The name of the secret containing docker registry credentials.\nSecret must exist in the same namespace as the helm release.", - "required": [], - "title": "imagePullSecret", - "type": "string" - }, - "imagePullSecrets": { - "description": "A list of secret names containing docker registry credentials.\nSecrets must exist in the same namespace as the helm release.", + "topologySpreadConstraints": { + "description": "The topology spread constraints for the NGINX Gateway Fabric control plane pod.", "items": { "required": [] }, "required": [], - "title": "imagePullSecrets", + "title": "topologySpreadConstraints", "type": "array" - }, - "name": { - "default": "", - "description": "The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC.", - "required": [], - "title": "name", - "type": "string" } }, - "required": [], - "title": "serviceAccount", + "required": [ + "gatewayClassName", + "gatewayControllerName" + ], + "title": "nginxGateway", "type": "object" - }, - "terminationGracePeriodSeconds": { - "default": 30, - "description": "The termination grace period of the NGINX Gateway Fabric pod.", - "required": [], - "title": "terminationGracePeriodSeconds", - "type": "integer" - }, - "tolerations": { - "description": "Tolerations for the NGINX Gateway Fabric pod.", - "items": { - "required": [] - }, - "required": [], - "title": "tolerations", - "type": "array" - }, - "topologySpreadConstraints": { - "description": "The topology spread constraints for the NGINX Gateway Fabric pod.", - "items": { - "required": [] - }, - "required": [], - "title": "topologySpreadConstraints", - "type": "array" } }, "required": [], diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index c817fb76b7..a42779b1c2 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -1,5 +1,9 @@ # yaml-language-server: $schema=values.schema.json +# -- The DNS cluster domain of your Kubernetes cluster. +clusterDomain: cluster.local + +# -- The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. nginxGateway: # FIXME(lucacome): https://github.com/nginx/nginx-gateway-fabric/issues/2490 @@ -50,8 +54,30 @@ nginxGateway: # -- Set of custom annotations for NginxGateway objects. configAnnotations: {} + # -- The service configuration for the NGINX Gateway Fabric control plane. + service: + # -- The annotations of the NGINX Gateway Fabric control plane service. + annotations: {} + + # -- The serviceaccount configuration for the NGINX Gateway Fabric control plane. + serviceAccount: + # -- Set of custom annotations for the NGINX Gateway Fabric control plane service account. + annotations: {} + + # -- The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC. + # @default -- Autogenerated if not set or set to "" + name: "" + + # -- The name of the secret containing docker registry credentials for the control plane. + # Secret must exist in the same namespace as the helm release. + imagePullSecret: "" + + # -- A list of secret names containing docker registry credentials for the control plane. + # Secrets must exist in the same namespace as the helm release. + imagePullSecrets: [] + # -- The number of replicas of the NGINX Gateway Fabric Deployment. - replicaCount: 1 + replicas: 1 # The configuration for leader election. leaderElection: @@ -66,7 +92,7 @@ nginxGateway: lockName: "" ## Defines the settings for the control plane readiness probe. This probe returns Ready when the controller - ## has started and configured NGINX to serve traffic. + ## has started and is ready to configure NGINX. readinessProbe: # -- Enable the /readyz endpoint on the control plane. enable: true @@ -82,6 +108,7 @@ nginxGateway: # -- The number of seconds after the Pod has started before the readiness probes are initiated. initialDelaySeconds: 3 + # -- The image configuration for the NGINX Gateway Fabric control plane. image: # -- The NGINX Gateway Fabric image to use repository: ghcr.io/nginx/nginx-gateway-fabric @@ -94,10 +121,6 @@ nginxGateway: # @schema pullPolicy: Always - securityContext: - # -- Some environments may need this set to true in order for the control plane to successfully reload NGINX. - allowPrivilegeEscalation: false - productTelemetry: # -- Enable the collection of product telemetry. enable: true @@ -108,9 +131,44 @@ nginxGateway: # -- The resource requests and/or limits of the nginx-gateway container. resources: {} + # -- extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with + # nginxGateway.extraVolumeMounts mount additional volumes to the container. + extraVolumes: [] + # -- extraVolumeMounts are the additional volume mounts for the nginx-gateway container. extraVolumeMounts: [] + # -- The termination grace period of the NGINX Gateway Fabric control plane pod. + terminationGracePeriodSeconds: 30 + + # -- Tolerations for the NGINX Gateway Fabric control plane pod. + tolerations: [] + + # -- The nodeSelector of the NGINX Gateway Fabric control plane pod. + nodeSelector: {} + + # -- The affinity of the NGINX Gateway Fabric control plane pod. + affinity: {} + + # -- The topology spread constraints for the NGINX Gateway Fabric control plane pod. + topologySpreadConstraints: [] + + metrics: + # -- Enable exposing metrics in the Prometheus format. + enable: true + + # @schema + # type: integer + # minimum: 1 + # maximum: 65535 + # @schema + # -- Set the port where the Prometheus metrics are exposed. + port: 9113 + + # -- Enable serving metrics via https. By default metrics are served via http. + # Please note that this endpoint will be secured with a self-signed certificate. + secure: false + gwAPIExperimentalFeatures: # -- Enable the experimental features of Gateway API which are supported by NGINX Gateway Fabric. Requires the Gateway # APIs installed from the experimental channel. @@ -121,7 +179,19 @@ nginxGateway: # config for HTTPRoute and GRPCRoute resources. enable: false +# -- The nginx section contains the configuration for all NGINX data plane deployments +# installed by the NGINX Gateway Fabric control plane. nginx: + # @schema + # enum: + # - deployment + # @schema + # -- The kind of NGINX deployment. + kind: deployment + + # -- The number of replicas of the NGINX Deployment. + replicas: 1 + image: # -- The NGINX image to use. repository: ghcr.io/nginx/nginx-gateway-fabric/nginx @@ -134,9 +204,19 @@ nginx: # @schema pullPolicy: Always - # -- Is NGINX Plus image being used + # -- Is NGINX Plus image being used. plus: false + # -- The name of the secret containing docker registry credentials. + # Secret must exist in the same namespace as the helm release. The control + # plane will copy this secret into any namespace where NGINX is deployed. + imagePullSecret: "" + + # -- A list of secret names containing docker registry credentials. + # Secrets must exist in the same namespace as the helm release. The control + # plane will copy these secrets into any namespace where NGINX is deployed. + imagePullSecrets: [] + # Configuration for NGINX Plus usage reporting. usage: # -- The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace @@ -232,6 +312,22 @@ nginx: # pattern: ^([^"$\\]|\\[^$])*$ # minLength: 1 # maxLength: 255 + # disabledFeatures: + # type: array + # items: + # type: string + # enum: + # - DisableTracing + # metrics: + # type: object + # description: Metrics defines the configuration for Prometheus scraping metrics. + # properties: + # disable: + # type: boolean + # port: + # type: integer + # minimum: 1 + # maximum: 65535 # logging: # type: object # description: Logging defines logging related settings for NGINX. @@ -247,6 +343,14 @@ nginx: # - crit # - alert # - emerg + # agentLevel: + # type: string + # enum: + # - debug + # - info + # - error + # - panic + # - fatal # nginxPlus: # type: object # description: NginxPlus specifies NGINX Plus additional settings. @@ -263,128 +367,113 @@ nginx: # value: # type: string # @schema - # -- The configuration for the data plane that is contained in the NginxProxy resource. + # -- The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways + # managed by this instance of NGINX Gateway Fabric. config: {} - # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. - debug: false + # -- The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. + pod: {} + # -- The termination grace period of the NGINX data plane pod. + # terminationGracePeriodSeconds: 30 - # -- The lifecycle of the nginx container. - lifecycle: {} + # -- Tolerations for the NGINX data plane pod. + # tolerations: [] - # -- extraVolumeMounts are the additional volume mounts for the nginx container. - extraVolumeMounts: [] + # -- The nodeSelector of the NGINX data plane pod. + # nodeSelector: {} -# -- The termination grace period of the NGINX Gateway Fabric pod. -terminationGracePeriodSeconds: 30 + # -- The affinity of the NGINX data plane pod. + # affinity: {} -# -- Tolerations for the NGINX Gateway Fabric pod. -tolerations: [] + # -- The topology spread constraints for the NGINX data plane pod. + # topologySpreadConstraints: [] -# -- The nodeSelector of the NGINX Gateway Fabric pod. -nodeSelector: {} + # -- extraVolumes for the NGINX data plane pod. Use in conjunction with + # nginx.container.extraVolumeMounts mount additional volumes to the container. + # extraVolumes: [] -# -- The affinity of the NGINX Gateway Fabric pod. -affinity: {} + # -- The container configuration for the NGINX container. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. + container: {} + # -- The resource requirements of the NGINX container. + # resources: {} -# -- The topology spread constraints for the NGINX Gateway Fabric pod. -topologySpreadConstraints: [] + # -- The lifecycle of the NGINX container. + # lifecycle: {} -serviceAccount: - # -- Set of custom annotations for the NGINX Gateway Fabric service account. - annotations: {} + # -- extraVolumeMounts are the additional volume mounts for the NGINX container. + # extraVolumeMounts: [] - # -- The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. - # @default -- Autogenerated if not set or set to "" - name: "" - - # -- The name of the secret containing docker registry credentials. - # Secret must exist in the same namespace as the helm release. - imagePullSecret: "" - - # -- A list of secret names containing docker registry credentials. - # Secrets must exist in the same namespace as the helm release. - imagePullSecrets: [] + # -- The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. + service: + # @schema + # enum: + # - ClusterIP + # - NodePort + # - LoadBalancer + # @schema + # -- The type of service to create for the NGINX data plane. + type: LoadBalancer -service: - # -- Creates a service to expose the NGINX Gateway Fabric pods. - create: true + # @schema + # enum: + # - Cluster + # - Local + # @schema + # -- The externalTrafficPolicy of the service. The value Local preserves the client source IP. + externalTrafficPolicy: Local - # @schema - # enum: - # - ClusterIP - # - NodePort - # - LoadBalancer - # @schema - # -- The type of service to create for the NGINX Gateway Fabric. - type: LoadBalancer + # -- The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. + loadBalancerIP: "" - # @schema - # enum: - # - Cluster - # - Local - # @schema - # -- The externalTrafficPolicy of the service. The value Local preserves the client source IP. - externalTrafficPolicy: Local + # -- LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + # Requires nginx.service.type set to LoadBalancer. + loadBalancerClass: "" - # -- The annotations of the NGINX Gateway Fabric service. - annotations: {} + # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. + loadBalancerSourceRanges: [] - # -- The static IP address for the load balancer. Requires service.type set to LoadBalancer. - loadBalancerIP: "" + # @schema + # type: array + # items: + # type: object + # properties: + # port: + # type: integer + # required: true + # minimum: 1 + # maximum: 65535 + # listenerPort: + # type: integer + # required: true + # minimum: 1 + # maximum: 65535 + # @schema + # -- A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port, + # otherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767. + nodePorts: [] + # - port: 30025 + # listenerPort: 80 - # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer. - loadBalancerSourceRanges: [] + # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. + debug: false - # @schema - # type: array - # items: - # type: object - # properties: - # port: - # type: integer - # minimum: 1 - # maximum: 65535 - # targetPort: - # type: integer - # minimum: 1 - # maximum: 65535 - # protocol: - # type: string - # enum: - # - TCP - # - UDP - # name: - # type: string - # @schema - # -- A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from - # your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - -metrics: - # -- Enable exposing metrics in the Prometheus format. - enable: true +# -- The certGenerator section contains the configuration for the cert-generator Job. +certGenerator: + # -- The annotations of the cert-generator Job. + annotations: {} - # @schema - # type: integer - # minimum: 1 - # maximum: 65535 - # @schema - # -- Set the port where the Prometheus metrics are exposed. - port: 9113 + # -- The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane + # to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric + # control plane is running in (default namespace: nginx-gateway). + serverTLSSecretName: server-tls - # -- Enable serving metrics via https. By default metrics are served via http. - # Please note that this endpoint will be secured with a self-signed certificate. - secure: false + # -- The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely + # communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the + # NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). + agentTLSSecretName: agent-tls -# -- extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with -# nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. -extraVolumes: [] + # -- Overwrite existing TLS Secrets on startup. + overwrite: false diff --git a/cmd/gateway/certs.go b/cmd/gateway/certs.go new file mode 100644 index 0000000000..6f7a22d97e --- /dev/null +++ b/cmd/gateway/certs.go @@ -0,0 +1,230 @@ +package main + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" //nolint:gosec // using sha1 in this case is fine + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +const ( + expiry = 365 * 3 * 24 * time.Hour // 3 years + defaultDomain = "cluster.local" +) + +var subject = pkix.Name{ + CommonName: "nginx-gateway", + Country: []string{"US"}, + Locality: []string{"SEA"}, + Organization: []string{"F5"}, + OrganizationalUnit: []string{"NGINX"}, +} + +type certificateConfig struct { + caCertificate []byte + serverCertificate []byte + serverKey []byte + clientCertificate []byte + clientKey []byte +} + +// generateCertificates creates a CA, server, and client certificates and keys. +func generateCertificates(service, namespace, clientDNSDomain string) (*certificateConfig, error) { + caCertPEM, caKeyPEM, err := generateCA() + if err != nil { + return nil, fmt.Errorf("error generating CA: %w", err) + } + + caKeyPair, err := tls.X509KeyPair(caCertPEM, caKeyPEM) + if err != nil { + return nil, err + } + + serverCert, serverKey, err := generateCert(caKeyPair, serverDNSNames(service, namespace)) + if err != nil { + return nil, fmt.Errorf("error generating server cert: %w", err) + } + + clientCert, clientKey, err := generateCert(caKeyPair, clientDNSNames(clientDNSDomain)) + if err != nil { + return nil, fmt.Errorf("error generating client cert: %w", err) + } + + return &certificateConfig{ + caCertificate: caCertPEM, + serverCertificate: serverCert, + serverKey: serverKey, + clientCertificate: clientCert, + clientKey: clientKey, + }, nil +} + +func generateCA() ([]byte, []byte, error) { + caKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + ca := &x509.Certificate{ + Subject: subject, + NotBefore: time.Now(), + NotAfter: time.Now().Add(expiry), + SubjectKeyId: subjectKeyID(caKey.N), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + IsCA: true, + BasicConstraintsValid: true, + } + + caCertBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + + caCertPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caCertBytes, + }) + + caKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(caKey), + }) + + return caCertPEM, caKeyPEM, nil +} + +func generateCert(caKeyPair tls.Certificate, dnsNames []string) ([]byte, []byte, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + cert := &x509.Certificate{ + Subject: subject, + NotBefore: time.Now(), + NotAfter: time.Now().Add(expiry), + SubjectKeyId: subjectKeyID(key.N), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + DNSNames: dnsNames, + } + + caCert, err := x509.ParseCertificate(caKeyPair.Certificate[0]) + if err != nil { + return nil, nil, err + } + + certBytes, err := x509.CreateCertificate(rand.Reader, cert, caCert, &key.PublicKey, caKeyPair.PrivateKey) + if err != nil { + return nil, nil, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + keyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + }) + + return certPEM, keyPEM, nil +} + +// subjectKeyID generates the SubjectKeyID using the modulus of the private key. +func subjectKeyID(n *big.Int) []byte { + h := sha1.New() //nolint:gosec // using sha1 in this case is fine + h.Write(n.Bytes()) + return h.Sum(nil) +} + +func serverDNSNames(service, namespace string) []string { + return []string{ + fmt.Sprintf("%s.%s.svc", service, namespace), + } +} + +func clientDNSNames(dnsDomain string) []string { + return []string{ + fmt.Sprintf("*.%s", dnsDomain), + } +} + +func createSecrets( + ctx context.Context, + k8sClient client.Client, + certConfig *certificateConfig, + serverSecretName, + clientSecretName, + namespace string, + overwrite bool, +) error { + serverSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverSecretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "ca.crt": certConfig.caCertificate, + "tls.crt": certConfig.serverCertificate, + "tls.key": certConfig.serverKey, + }, + } + + clientSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientSecretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "ca.crt": certConfig.caCertificate, + "tls.crt": certConfig.clientCertificate, + "tls.key": certConfig.clientKey, + }, + } + + logger := ctlrZap.New().WithName("cert-generator") + for _, secret := range []corev1.Secret{serverSecret, clientSecret} { + key := client.ObjectKeyFromObject(&secret) + currentSecret := &corev1.Secret{} + + if err := k8sClient.Get(ctx, key, currentSecret); err != nil { + if apierrors.IsNotFound(err) { + if err := k8sClient.Create(ctx, &secret); err != nil { + return fmt.Errorf("error creating secret %v: %w", key, err) + } + } else { + return fmt.Errorf("error getting secret %v: %w", key, err) + } + } else { + if !overwrite { + logger.Info("Skipping updating Secret. Must be updated manually or by another source.", "name", key) + continue + } + + if !reflect.DeepEqual(secret.Data, currentSecret.Data) { + if err := k8sClient.Update(ctx, &secret); err != nil { + return fmt.Errorf("error updating secret %v: %w", key, err) + } + } + } + } + + return nil +} diff --git a/cmd/gateway/certs_test.go b/cmd/gateway/certs_test.go new file mode 100644 index 0000000000..4a9bfbe164 --- /dev/null +++ b/cmd/gateway/certs_test.go @@ -0,0 +1,139 @@ +package main + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestGenerateCertificates(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + certConfig, err := generateCertificates("nginx", "default", "cluster.local") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(certConfig).ToNot(BeNil()) + g.Expect(certConfig.caCertificate).ToNot(BeNil()) + g.Expect(certConfig.serverCertificate).ToNot(BeNil()) + g.Expect(certConfig.serverKey).ToNot(BeNil()) + g.Expect(certConfig.clientCertificate).ToNot(BeNil()) + g.Expect(certConfig.clientKey).ToNot(BeNil()) + + block, _ := pem.Decode(certConfig.caCertificate) + g.Expect(block).ToNot(BeNil()) + caCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(caCert.IsCA).To(BeTrue()) + + pool := x509.NewCertPool() + g.Expect(pool.AppendCertsFromPEM(certConfig.caCertificate)).To(BeTrue()) + + block, _ = pem.Decode(certConfig.serverCertificate) + g.Expect(block).ToNot(BeNil()) + serverCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + + _, err = serverCert.Verify(x509.VerifyOptions{ + DNSName: "nginx.default.svc", + Roots: pool, + }) + g.Expect(err).ToNot(HaveOccurred()) + + block, _ = pem.Decode(certConfig.clientCertificate) + g.Expect(block).ToNot(BeNil()) + clientCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + + _, err = clientCert.Verify(x509.VerifyOptions{ + DNSName: "*.cluster.local", + Roots: pool, + }) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCreateSecrets(t *testing.T) { + t.Parallel() + + fakeClient := fake.NewFakeClient() + + tests := []struct { + name string + overwrite bool + }{ + { + name: "doesn't overwrite on updates", + overwrite: false, + }, + { + name: "overwrites on updates", + overwrite: true, + }, + } + + verifySecrets := func(g *WithT, name string, overwrite bool) { + certConfig, err := generateCertificates("nginx", "default", "cluster.local") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(certConfig).ToNot(BeNil()) + + serverSecretName := fmt.Sprintf("%s-server-secret", name) + clientSecretName := fmt.Sprintf("%s-client-secret", name) + err = createSecrets(t.Context(), fakeClient, certConfig, serverSecretName, clientSecretName, "default", overwrite) + g.Expect(err).ToNot(HaveOccurred()) + + serverSecret := &corev1.Secret{} + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: serverSecretName, Namespace: "default"}, serverSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(serverSecret.Data["ca.crt"]).To(Equal(certConfig.caCertificate)) + g.Expect(serverSecret.Data["tls.crt"]).To(Equal(certConfig.serverCertificate)) + g.Expect(serverSecret.Data["tls.key"]).To(Equal(certConfig.serverKey)) + + clientSecret := &corev1.Secret{} + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: clientSecretName, Namespace: "default"}, clientSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(clientSecret.Data["ca.crt"]).To(Equal(certConfig.caCertificate)) + g.Expect(clientSecret.Data["tls.crt"]).To(Equal(certConfig.clientCertificate)) + g.Expect(clientSecret.Data["tls.key"]).To(Equal(certConfig.clientKey)) + + // If overwrite is false, then no updates should occur. If true, then updates should occur. + newCertConfig, err := generateCertificates("nginx", "default", "new-DNS-name") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(newCertConfig).ToNot(BeNil()) + g.Expect(newCertConfig).ToNot(Equal(certConfig)) + + err = createSecrets(t.Context(), fakeClient, newCertConfig, serverSecretName, clientSecretName, "default", overwrite) + g.Expect(err).ToNot(HaveOccurred()) + + expCertConfig := certConfig + if overwrite { + expCertConfig = newCertConfig + } + + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: serverSecretName, Namespace: "default"}, serverSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(serverSecret.Data["tls.crt"]).To(Equal(expCertConfig.serverCertificate)) + + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: clientSecretName, Namespace: "default"}, clientSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(clientSecret.Data["tls.crt"]).To(Equal(expCertConfig.clientCertificate)) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + name := "no-overwrite" + if test.overwrite { + name = "overwrite" + } + + verifySecrets(g, name, test.overwrite) + }) + } +} diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index ed2b473de1..be076a76da 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -6,25 +6,25 @@ import ( "os" "runtime/debug" "strconv" + "strings" "time" "github.com/spf13/cobra" "github.com/spf13/pflag" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" ctlr "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + k8sConfig "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/log" ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" - "github.com/nginx/nginx-gateway-fabric/internal/mode/provisioner" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" ngxConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" ) // These flags are shared by multiple commands. @@ -37,6 +37,9 @@ const ( gatewayCtlrNameUsageFmt = `The name of the Gateway controller. ` + `The controller name must be of the form: DOMAIN/PATH. The controller's domain is '%s'` plusFlag = "nginx-plus" + + serverTLSSecret = "server-tls" + agentTLSSecret = "agent-tls" ) func createRootCommand() *cobra.Command { @@ -52,13 +55,12 @@ func createRootCommand() *cobra.Command { return rootCmd } -func createStaticModeCommand() *cobra.Command { +func createControllerCommand() *cobra.Command { // flag names const ( - gatewayFlag = "gateway" configFlag = "config" serviceFlag = "service" - updateGCStatusFlag = "update-gatewayclass-status" + agentTLSSecretFlag = "agent-tls-secret" metricsDisableFlag = "metrics-disable" metricsSecureFlag = "metrics-secure-serving" metricsPortFlag = "metrics-port" @@ -68,6 +70,7 @@ func createStaticModeCommand() *cobra.Command { leaderElectionLockNameFlag = "leader-election-lock-name" productTelemetryDisableFlag = "product-telemetry-disable" gwAPIExperimentalFlag = "gateway-api-experimental-features" + nginxDockerSecretFlag = "nginx-docker-secret" //nolint:gosec // not credentials usageReportSecretFlag = "usage-report-secret" usageReportEndpointFlag = "usage-report-endpoint" usageReportResolverFlag = "usage-report-resolver" @@ -75,6 +78,7 @@ func createStaticModeCommand() *cobra.Command { usageReportClientSSLSecretFlag = "usage-report-client-ssl-secret" //nolint:gosec // not credentials usageReportCASecretFlag = "usage-report-ca-secret" //nolint:gosec // not credentials snippetsFiltersFlag = "snippets-filters" + nginxSCCFlag = "nginx-scc" ) // flag values @@ -87,14 +91,19 @@ func createStaticModeCommand() *cobra.Command { validator: validateResourceName, } - updateGCStatus bool - gateway = namespacedNameValue{} - configName = stringValidatingValue{ + configName = stringValidatingValue{ validator: validateResourceName, } serviceName = stringValidatingValue{ validator: validateResourceName, } + agentTLSSecretName = stringValidatingValue{ + validator: validateResourceName, + value: agentTLSSecret, + } + nginxSCCName = stringValidatingValue{ + validator: validateResourceName, + } disableMetrics bool metricsSecure bool metricsListenPort = intValidatingValue{ @@ -119,7 +128,10 @@ func createStaticModeCommand() *cobra.Command { snippetsFilters bool - plus bool + plus bool + nginxDockerSecrets = stringSliceValidatingValue{ + validator: validateResourceName, + } usageReportSkipVerify bool usageReportSecretName = stringValidatingValue{ validator: validateResourceName, @@ -140,8 +152,8 @@ func createStaticModeCommand() *cobra.Command { ) cmd := &cobra.Command{ - Use: "static-mode", - Short: "Configure NGINX in the scope of a single Gateway resource", + Use: "controller", + Short: "Run the NGINX Gateway Fabric control plane", RunE: func(cmd *cobra.Command, _ []string) error { atom := zap.NewAtomicLevel() @@ -150,7 +162,7 @@ func createStaticModeCommand() *cobra.Command { commit, date, dirty := getBuildInfo() logger.Info( - "Starting NGINX Gateway Fabric in static mode", + "Starting the NGINX Gateway Fabric control plane", "version", version, "commit", commit, "date", date, @@ -183,11 +195,6 @@ func createStaticModeCommand() *cobra.Command { return fmt.Errorf("error parsing telemetry endpoint insecure: %w", err) } - var gwNsName *types.NamespacedName - if cmd.Flags().Changed(gatewayFlag) { - gwNsName = &gateway.value - } - var usageReportConfig config.UsageReportConfig if plus && usageReportSecretName.value == "" { return errors.New("usage-report-secret is required when using NGINX Plus") @@ -206,20 +213,18 @@ func createStaticModeCommand() *cobra.Command { flagKeys, flagValues := parseFlags(cmd.Flags()) - podConfig, err := createGatewayPodConfig(serviceName.value) + podConfig, err := createGatewayPodConfig(version, serviceName.value) if err != nil { return fmt.Errorf("error creating gateway pod config: %w", err) } conf := config.Config{ - GatewayCtlrName: gatewayCtlrName.value, - ConfigName: configName.String(), - Logger: logger, - AtomicLevel: atom, - GatewayClassName: gatewayClassName.value, - GatewayNsName: gwNsName, - UpdateGatewayClassStatus: updateGCStatus, - GatewayPodConfig: podConfig, + GatewayCtlrName: gatewayCtlrName.value, + ConfigName: configName.String(), + Logger: logger, + AtomicLevel: atom, + GatewayClassName: gatewayClassName.value, + GatewayPodConfig: podConfig, HealthConfig: config.HealthConfig{ Enabled: !disableHealth, Port: healthListenPort.value, @@ -242,14 +247,16 @@ func createStaticModeCommand() *cobra.Command { EndpointInsecure: telemetryEndpointInsecure, }, Plus: plus, - Version: version, ExperimentalFeatures: gwExperimentalFeatures, ImageSource: imageSource, Flags: config.Flags{ Names: flagKeys, Values: flagValues, }, - SnippetsFilters: snippetsFilters, + SnippetsFilters: snippetsFilters, + NginxDockerSecretNames: nginxDockerSecrets.values, + AgentTLSSecretName: agentTLSSecretName.value, + NGINXSCCName: nginxSCCName.value, } if err := static.StartManager(conf); err != nil { @@ -274,16 +281,6 @@ func createStaticModeCommand() *cobra.Command { ) utilruntime.Must(cmd.MarkFlagRequired(gatewayClassFlag)) - cmd.Flags().Var( - &gateway, - gatewayFlag, - "The namespaced name of the Gateway resource to use. "+ - "Must be of the form: NAMESPACE/NAME. "+ - "If not specified, the control plane will process all Gateways for the configured GatewayClass. "+ - "However, among them, it will choose the oldest resource by creation timestamp. If the timestamps are "+ - "equal, it will choose the resource that appears first in alphabetical order by {namespace}/{name}.", - ) - cmd.Flags().VarP( &configName, configFlag, @@ -299,11 +296,12 @@ func createStaticModeCommand() *cobra.Command { ` Lives in the same Namespace as the controller.`, ) - cmd.Flags().BoolVar( - &updateGCStatus, - updateGCStatusFlag, - true, - "Update the status of the GatewayClass resource.", + cmd.Flags().Var( + &agentTLSSecretName, + agentTLSSecretFlag, + `The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely `+ + `communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, ) cmd.Flags().BoolVar( @@ -378,6 +376,13 @@ func createStaticModeCommand() *cobra.Command { "Requires the Gateway APIs installed from the experimental channel.", ) + cmd.Flags().Var( + &nginxDockerSecrets, + nginxDockerSecretFlag, + "The name of the NGINX docker registry Secret(s). Must exist in the same namespace "+ + "that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).", + ) + cmd.Flags().Var( &usageReportSecretName, usageReportSecretFlag, @@ -428,85 +433,115 @@ func createStaticModeCommand() *cobra.Command { "generated NGINX config for HTTPRoute and GRPCRoute resources.", ) + cmd.Flags().Var( + &nginxSCCName, + nginxSCCFlag, + `The name of the SecurityContextConstraints to be used with the NGINX data plane Pods.`+ + ` Only applicable in OpenShift.`, + ) + return cmd } -func createProvisionerModeCommand() *cobra.Command { +func createGenerateCertsCommand() *cobra.Command { + // flag names + const ( + serverTLSSecretFlag = "server-tls-secret" //nolint:gosec // not credentials + agentTLSSecretFlag = "agent-tls-secret" + serviceFlag = "service" + clusterDomainFlag = "cluster-domain" + overwriteFlag = "overwrite" + ) + + // flag values var ( - gatewayCtlrName = stringValidatingValue{ - validator: validateGatewayControllerName, + serverTLSSecretName = stringValidatingValue{ + validator: validateResourceName, + value: serverTLSSecret, } - gatewayClassName = stringValidatingValue{ + agentTLSSecretName = stringValidatingValue{ validator: validateResourceName, + value: agentTLSSecret, } + serviceName = stringValidatingValue{ + validator: validateResourceName, + } + clusterDomain = stringValidatingValue{ + validator: validateQualifiedName, + value: defaultDomain, + } + overwrite bool ) cmd := &cobra.Command{ - Use: "provisioner-mode", - Short: "Provision a static-mode NGINX Gateway Fabric Deployment per Gateway resource", - Hidden: true, - RunE: func(_ *cobra.Command, _ []string) error { - logger := ctlrZap.New() - commit, date, dirty := getBuildInfo() - logger.Info( - "Starting NGINX Gateway Fabric Provisioner", - "version", version, - "commit", commit, - "date", date, - "dirty", dirty, - ) + Use: "generate-certs", + Short: "Generate self-signed certificates for securing control plane to data plane communication", + RunE: func(cmd *cobra.Command, _ []string) error { + namespace, err := getValueFromEnv("POD_NAMESPACE") + if err != nil { + return fmt.Errorf("POD_NAMESPACE must be specified in the ENV") + } - return provisioner.StartManager(provisioner.Config{ - Logger: logger, - GatewayClassName: gatewayClassName.value, - GatewayCtlrName: gatewayCtlrName.value, - }) + certConfig, err := generateCertificates(serviceName.value, namespace, clusterDomain.value) + if err != nil { + return fmt.Errorf("error generating certificates: %w", err) + } + + k8sClient, err := client.New(k8sConfig.GetConfigOrDie(), client.Options{}) + if err != nil { + return fmt.Errorf("error creating k8s client: %w", err) + } + + if err := createSecrets( + cmd.Context(), + k8sClient, + certConfig, + serverTLSSecretName.value, + agentTLSSecretName.value, + namespace, + overwrite, + ); err != nil { + return fmt.Errorf("error creating secrets: %w", err) + } + + return nil }, } cmd.Flags().Var( - &gatewayCtlrName, - gatewayCtlrNameFlag, - fmt.Sprintf(gatewayCtlrNameUsageFmt, domain), + &serverTLSSecretName, + serverTLSSecretFlag, + `The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane `+ + `to securely communicate with the NGINX Agent. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, ) - utilruntime.Must(cmd.MarkFlagRequired(gatewayCtlrNameFlag)) cmd.Flags().Var( - &gatewayClassName, - gatewayClassFlag, - gatewayClassNameUsage, + &agentTLSSecretName, + agentTLSSecretFlag, + `The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely `+ + `communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, ) - utilruntime.Must(cmd.MarkFlagRequired(gatewayClassFlag)) - - return cmd -} -// FIXME(pleshakov): Remove this command once NGF min supported Kubernetes version supports sleep action in -// preStop hook. -// See https://github.com/kubernetes/enhancements/tree/4ec371d92dcd4f56a2ab18c8ba20bb85d8d20efe/keps/sig-node/3960-pod-lifecycle-sleep-action -// -//nolint:lll -func createSleepCommand() *cobra.Command { - // flag names - const durationFlag = "duration" - // flag values - var duration time.Duration + cmd.Flags().Var( + &serviceName, + serviceFlag, + `The name of the Service that fronts the NGINX Gateway Fabric Pod.`+ + ` Lives in the same Namespace as the controller.`, + ) - cmd := &cobra.Command{ - Use: "sleep", - Short: "Sleep for specified duration and exit", - Run: func(_ *cobra.Command, _ []string) { - // It is expected that this command is run from lifecycle hook. - // Because logs from hooks are not visible in the container logs, we don't log here at all. - time.Sleep(duration) - }, - } + cmd.Flags().Var( + &clusterDomain, + clusterDomainFlag, + `The DNS domain of your Kubernetes cluster.`, + ) - cmd.Flags().DurationVar( - &duration, - durationFlag, - 30*time.Second, - "Set the duration of sleep. Must be parsable by https://pkg.go.dev/time#ParseDuration", + cmd.Flags().BoolVar( + &overwrite, + overwriteFlag, + false, + "Overwrite existing certificates.", ) return cmd @@ -519,14 +554,14 @@ func createInitializeCommand() *cobra.Command { // flag values var srcFiles []string - var dest string + var destDirs []string var plus bool cmd := &cobra.Command{ Use: "initialize", Short: "Write initial configuration files", RunE: func(_ *cobra.Command, _ []string) error { - if err := validateCopyArgs(srcFiles, dest); err != nil { + if err := validateCopyArgs(srcFiles, destDirs); err != nil { return err } @@ -546,7 +581,7 @@ func createInitializeCommand() *cobra.Command { logger.Info( "Starting init container", "source filenames to copy", srcFiles, - "destination directory", dest, + "destination directories", destDirs, "nginx-plus", plus, ) @@ -558,16 +593,21 @@ func createInitializeCommand() *cobra.Command { Logger: logger.WithName("deployCtxCollector"), }) + files := make([]fileToCopy, 0, len(srcFiles)) + for i, src := range srcFiles { + files = append(files, fileToCopy{ + destDirName: destDirs[i], + srcFileName: src, + }) + } + return initialize(initializeConfig{ fileManager: file.NewStdLibOSFileManager(), fileGenerator: ngxConfig.NewGeneratorImpl(plus, nil, logger.WithName("generator")), logger: logger, plus: plus, collector: dcc, - copy: copyFiles{ - srcFileNames: srcFiles, - destDirName: dest, - }, + copy: files, }) }, } @@ -579,11 +619,11 @@ func createInitializeCommand() *cobra.Command { "The source files to be copied", ) - cmd.Flags().StringVar( - &dest, + cmd.Flags().StringSliceVar( + &destDirs, destFlag, - "", - "The destination directory for the source files to be copied to", + []string{}, + "The destination directories for the source files at the same array index to be copied to", ) cmd.Flags().BoolVar( @@ -598,6 +638,37 @@ func createInitializeCommand() *cobra.Command { return cmd } +// FIXME(pleshakov): Remove this command once NGF min supported Kubernetes version supports sleep action in +// preStop hook. +// See https://github.com/kubernetes/enhancements/tree/4ec371d92dcd4f56a2ab18c8ba20bb85d8d20efe/keps/sig-node/3960-pod-lifecycle-sleep-action +// +//nolint:lll +func createSleepCommand() *cobra.Command { + // flag names + const durationFlag = "duration" + // flag values + var duration time.Duration + + cmd := &cobra.Command{ + Use: "sleep", + Short: "Sleep for specified duration and exit", + Run: func(_ *cobra.Command, _ []string) { + // It is expected that this command is run from lifecycle hook. + // Because logs from hooks are not visible in the container logs, we don't log here at all. + time.Sleep(duration) + }, + } + + cmd.Flags().DurationVar( + &duration, + durationFlag, + 30*time.Second, + "Set the duration of sleep. Must be parsable by https://pkg.go.dev/time#ParseDuration", + ) + + return cmd +} + func parseFlags(flags *pflag.FlagSet) ([]string, []string) { var flagKeys, flagValues []string @@ -644,33 +715,46 @@ func getBuildInfo() (commitHash string, commitTime string, dirtyBuild string) { return } -func createGatewayPodConfig(svcName string) (config.GatewayPodConfig, error) { - podIP, err := getValueFromEnv("POD_IP") +func createGatewayPodConfig(version, svcName string) (config.GatewayPodConfig, error) { + podUID, err := getValueFromEnv("POD_UID") if err != nil { return config.GatewayPodConfig{}, err } - podUID, err := getValueFromEnv("POD_UID") + ns, err := getValueFromEnv("POD_NAMESPACE") if err != nil { return config.GatewayPodConfig{}, err } - ns, err := getValueFromEnv("POD_NAMESPACE") + name, err := getValueFromEnv("POD_NAME") if err != nil { return config.GatewayPodConfig{}, err } - name, err := getValueFromEnv("POD_NAME") + instance, err := getValueFromEnv("INSTANCE_NAME") if err != nil { return config.GatewayPodConfig{}, err } + image, err := getValueFromEnv("IMAGE_NAME") + if err != nil { + return config.GatewayPodConfig{}, err + } + + // use image tag version if set, otherwise fall back to binary version + ngfVersion := version + if imageParts := strings.Split(image, ":"); len(imageParts) == 2 { + ngfVersion = imageParts[1] + } + c := config.GatewayPodConfig{ - PodIP: podIP, - ServiceName: svcName, - Namespace: ns, - Name: name, - UID: podUID, + ServiceName: svcName, + Namespace: ns, + Name: name, + UID: podUID, + InstanceName: instance, + Version: ngfVersion, + Image: image, } return c, nil diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 2c1ac5d266..8db899f1cb 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -9,7 +9,6 @@ import ( . "github.com/onsi/gomega" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/types" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) @@ -122,18 +121,14 @@ func TestCommonFlagsValidation(t *testing.T) { } for _, test := range tests { - t.Run(test.name+"_static_mode", func(t *testing.T) { + t.Run(test.name+"_controller", func(t *testing.T) { t.Parallel() - testFlag(t, createStaticModeCommand(), test) - }) - t.Run(test.name+"_provisioner_mode", func(t *testing.T) { - t.Parallel() - testFlag(t, createProvisionerModeCommand(), test) + testFlag(t, createControllerCommand(), test) }) } } -func TestStaticModeCmdFlagValidation(t *testing.T) { +func TestControllerCmdFlagValidation(t *testing.T) { t.Parallel() tests := []flagTestCase{ { @@ -141,10 +136,9 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { args: []string{ "--gateway-ctlr-name=gateway.nginx.org/nginx-gateway", // common and required flag "--gatewayclass=nginx", // common and required flag - "--gateway=nginx-gateway/nginx", "--config=nginx-gateway-config", "--service=nginx-gateway", - "--update-gatewayclass-status=true", + "--agent-tls-secret=agent-tls", "--metrics-port=9114", "--metrics-disable", "--metrics-secure-serving", @@ -153,12 +147,15 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { "--leader-election-lock-name=my-lock", "--leader-election-disable=false", "--nginx-plus", + "--nginx-docker-secret=secret1", + "--nginx-docker-secret=secret2", "--usage-report-secret=my-secret", "--usage-report-endpoint=example.com", "--usage-report-resolver=resolver.com", "--usage-report-ca-secret=ca-secret", "--usage-report-client-ssl-secret=client-secret", "--snippets-filters", + "--nginx-scc=nginx-sscc-name", }, wantErr: false, }, @@ -170,23 +167,6 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { }, wantErr: false, }, - { - name: "gateway is set to empty string", - args: []string{ - "--gateway=", - }, - wantErr: true, - expectedErrPrefix: `invalid argument "" for "--gateway" flag: must be set`, - }, - { - name: "gateway is invalid", - args: []string{ - "--gateway=nginx-gateway", // no namespace - }, - wantErr: true, - expectedErrPrefix: `invalid argument "nginx-gateway" for "--gateway" flag: invalid format; ` + - "must be NAMESPACE/NAME", - }, { name: "config is set to empty string", args: []string{ @@ -220,20 +200,20 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { expectedErrPrefix: `invalid argument "!@#$" for "--service" flag: invalid format`, }, { - name: "update-gatewayclass-status is set to empty string", + name: "agent-tls-secret is set to empty string", args: []string{ - "--update-gatewayclass-status=", + "--agent-tls-secret=", }, wantErr: true, - expectedErrPrefix: `invalid argument "" for "--update-gatewayclass-status" flag: strconv.ParseBool`, + expectedErrPrefix: `invalid argument "" for "--agent-tls-secret" flag: must be set`, }, { - name: "update-gatewayclass-status is invalid", + name: "agent-tls-secret is set to invalid string", args: []string{ - "--update-gatewayclass-status=invalid", // not a boolean + "--agent-tls-secret=!@#$", }, wantErr: true, - expectedErrPrefix: `invalid argument "invalid" for "--update-gatewayclass-status" flag: strconv.ParseBool`, + expectedErrPrefix: `invalid argument "!@#$" for "--agent-tls-secret" flag: invalid format`, }, { name: "metrics-port is invalid type", @@ -314,6 +294,31 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { wantErr: true, expectedErrPrefix: `invalid argument "" for "--leader-election-disable" flag: strconv.ParseBool`, }, + { + name: "nginx-docker-secret is set to empty string", + args: []string{ + "--nginx-docker-secret=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--nginx-docker-secret" flag: must be set`, + }, + { + name: "nginx-docker-secret is invalid", + args: []string{ + "--nginx-docker-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-docker-secret" flag: invalid format: `, + }, + { + name: "one nginx-docker-secret is invalid", + args: []string{ + "--nginx-docker-secret=valid", + "--nginx-docker-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-docker-secret" flag: invalid format: `, + }, { name: "usage-report-secret is set to empty string", args: []string{ @@ -405,6 +410,22 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { }, wantErr: true, }, + { + name: "nginx-scc is set to empty string", + args: []string{ + "--nginx-scc=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--nginx-scc" flag: must be set`, + }, + { + name: "nginx-scc is invalid", + args: []string{ + "--nginx-scc=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-scc" flag: invalid format: `, + }, } // common flags validation is tested separately @@ -412,35 +433,24 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() - cmd := createStaticModeCommand() + cmd := createControllerCommand() testFlag(t, cmd, test) }) } } -func TestProvisionerModeCmdFlagValidation(t *testing.T) { +func TestGenerateCertsCmdFlagValidation(t *testing.T) { t.Parallel() - testCase := flagTestCase{ - name: "valid flags", - args: []string{ - "--gateway-ctlr-name=gateway.nginx.org/nginx-gateway", // common and required flag - "--gatewayclass=nginx", // common and required flag - }, - wantErr: false, - } - - // common flags validation is tested separately - - testFlag(t, createProvisionerModeCommand(), testCase) -} -func TestSleepCmdFlagValidation(t *testing.T) { - t.Parallel() tests := []flagTestCase{ { name: "valid flags", args: []string{ - "--duration=1s", + "--server-tls-secret=server-secret", + "--agent-tls-secret=agent-secret", + "--service=my-service", + "--cluster-domain=cluster.local", + "--overwrite", }, wantErr: false, }, @@ -450,27 +460,75 @@ func TestSleepCmdFlagValidation(t *testing.T) { wantErr: false, }, { - name: "duration is set to empty string", + name: "server-tls-secret is set to empty string", args: []string{ - "--duration=", + "--server-tls-secret=", }, wantErr: true, - expectedErrPrefix: `invalid argument "" for "--duration" flag: time: invalid duration ""`, + expectedErrPrefix: `invalid argument "" for "--server-tls-secret" flag: must be set`, }, { - name: "duration is invalid", + name: "server-tls-secret is invalid", args: []string{ - "--duration=invalid", + "--server-tls-secret=!@#$", }, wantErr: true, - expectedErrPrefix: `invalid argument "invalid" for "--duration" flag: time: invalid duration "invalid"`, + expectedErrPrefix: `invalid argument "!@#$" for "--server-tls-secret" flag: invalid format`, + }, + { + name: "agent-tls-secret is set to empty string", + args: []string{ + "--agent-tls-secret=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--agent-tls-secret" flag: must be set`, + }, + { + name: "agent-tls-secret is invalid", + args: []string{ + "--agent-tls-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--agent-tls-secret" flag: invalid format`, + }, + { + name: "service is set to empty string", + args: []string{ + "--service=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--service" flag: must be set`, + }, + { + name: "service is invalid", + args: []string{ + "--service=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--service" flag: invalid format`, + }, + { + name: "cluster-domain is set to empty string", + args: []string{ + "--cluster-domain=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--cluster-domain" flag: must be set`, + }, + { + name: "cluster-domain is invalid", + args: []string{ + "--cluster-domain=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--cluster-domain" flag: invalid format`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() - cmd := createSleepCommand() + cmd := createGenerateCertsCommand() testFlag(t, cmd, test) }) } @@ -522,6 +580,48 @@ func TestInitializeCmdFlagValidation(t *testing.T) { } } +func TestSleepCmdFlagValidation(t *testing.T) { + t.Parallel() + tests := []flagTestCase{ + { + name: "valid flags", + args: []string{ + "--duration=1s", + }, + wantErr: false, + }, + { + name: "omitted flags", + args: nil, + wantErr: false, + }, + { + name: "duration is set to empty string", + args: []string{ + "--duration=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--duration" flag: time: invalid duration ""`, + }, + { + name: "duration is invalid", + args: []string{ + "--duration=invalid", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "invalid" for "--duration" flag: time: invalid duration "invalid"`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + cmd := createSleepCommand() + testFlag(t, cmd, test) + }) + } +} + func TestParseFlags(t *testing.T) { t.Parallel() g := NewWithT(t) @@ -591,30 +691,6 @@ func TestParseFlags(t *testing.T) { err = flagSet.Set("customStringFlagUserDefined", "changed-test-flag-value") g.Expect(err).To(Not(HaveOccurred())) - customStringFlagNoDefaultValueUnset := namespacedNameValue{ - value: types.NamespacedName{}, - } - flagSet.Var( - &customStringFlagNoDefaultValueUnset, - "customStringFlagNoDefaultValueUnset", - "no default value custom string test flag", - ) - - customStringFlagNoDefaultValueUserDefined := namespacedNameValue{ - value: types.NamespacedName{}, - } - flagSet.Var( - &customStringFlagNoDefaultValueUserDefined, - "customStringFlagNoDefaultValueUserDefined", - "no default value but with user defined namespacedName test flag", - ) - userDefinedNamespacedName := types.NamespacedName{ - Namespace: "changed-namespace", - Name: "changed-name", - } - err = flagSet.Set("customStringFlagNoDefaultValueUserDefined", userDefinedNamespacedName.String()) - g.Expect(err).To(Not(HaveOccurred())) - expectedKeys := []string{ "boolFlagTrue", "boolFlagFalse", @@ -624,9 +700,6 @@ func TestParseFlags(t *testing.T) { "customStringFlagDefault", "customStringFlagUserDefined", - - "customStringFlagNoDefaultValueUnset", - "customStringFlagNoDefaultValueUserDefined", } expectedValues := []string{ "true", @@ -637,9 +710,6 @@ func TestParseFlags(t *testing.T) { "default", "user-defined", - - "default", - "user-defined", } flagKeys, flagValues := parseFlags(flagSet) @@ -669,43 +739,62 @@ func TestCreateGatewayPodConfig(t *testing.T) { // Order matters here // We start with all env vars set - g.Expect(os.Setenv("POD_IP", "10.0.0.0")).To(Succeed()) g.Expect(os.Setenv("POD_UID", "1234")).To(Succeed()) g.Expect(os.Setenv("POD_NAMESPACE", "default")).To(Succeed()) g.Expect(os.Setenv("POD_NAME", "my-pod")).To(Succeed()) + g.Expect(os.Setenv("INSTANCE_NAME", "my-pod-xyz")).To(Succeed()) + g.Expect(os.Setenv("IMAGE_NAME", "my-pod-image:tag")).To(Succeed()) + + version := "0.0.0" expCfg := config.GatewayPodConfig{ - PodIP: "10.0.0.0", - ServiceName: "svc", - Namespace: "default", - Name: "my-pod", - UID: "1234", + ServiceName: "svc", + Namespace: "default", + Name: "my-pod", + UID: "1234", + InstanceName: "my-pod-xyz", + Version: "tag", + Image: "my-pod-image:tag", } - cfg, err := createGatewayPodConfig("svc") + cfg, err := createGatewayPodConfig(version, "svc") g.Expect(err).To(Not(HaveOccurred())) g.Expect(cfg).To(Equal(expCfg)) + // unset image tag and use provided version + g.Expect(os.Setenv("IMAGE_NAME", "my-pod-image")).To(Succeed()) + expCfg.Version = version + expCfg.Image = "my-pod-image" + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(Not(HaveOccurred())) + g.Expect(cfg).To(Equal(expCfg)) + + // unset image name + g.Expect(os.Unsetenv("IMAGE_NAME")).To(Succeed()) + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(MatchError(errors.New("environment variable IMAGE_NAME not set"))) + g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) + + // unset instance name + g.Expect(os.Unsetenv("INSTANCE_NAME")).To(Succeed()) + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(MatchError(errors.New("environment variable INSTANCE_NAME not set"))) + g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) + // unset name g.Expect(os.Unsetenv("POD_NAME")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_NAME not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) // unset namespace g.Expect(os.Unsetenv("POD_NAMESPACE")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_NAMESPACE not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) // unset pod UID g.Expect(os.Unsetenv("POD_UID")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_UID not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) - - // unset IP - g.Expect(os.Unsetenv("POD_IP")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") - g.Expect(err).To(MatchError(errors.New("environment variable POD_IP not set"))) - g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) } diff --git a/cmd/gateway/initialize.go b/cmd/gateway/initialize.go index 59af1e4f0e..516d2e95f7 100644 --- a/cmd/gateway/initialize.go +++ b/cmd/gateway/initialize.go @@ -3,23 +3,24 @@ package main import ( "context" "fmt" + "os" "path/filepath" "time" "github.com/go-logr/logr" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" ) const ( collectDeployCtxTimeout = 10 * time.Second ) -type copyFiles struct { - destDirName string - srcFileNames []string +type fileToCopy struct { + destDirName string + srcFileName string } type initializeConfig struct { @@ -27,13 +28,13 @@ type initializeConfig struct { fileManager file.OSFileManager fileGenerator config.Generator logger logr.Logger - copy copyFiles + copy []fileToCopy plus bool } func initialize(cfg initializeConfig) error { - for _, src := range cfg.copy.srcFileNames { - if err := copyFile(cfg.fileManager, src, cfg.copy.destDirName); err != nil { + for _, f := range cfg.copy { + if err := copyFile(cfg.fileManager, f.srcFileName, f.destDirName); err != nil { return err } } @@ -58,7 +59,7 @@ func initialize(cfg initializeConfig) error { return fmt.Errorf("failed to generate deployment context file: %w", err) } - if err := file.WriteFile(cfg.fileManager, depCtxFile); err != nil { + if err := file.Write(cfg.fileManager, file.Convert(depCtxFile)); err != nil { return fmt.Errorf("failed to write deployment context file: %w", err) } @@ -84,5 +85,9 @@ func copyFile(osFileManager file.OSFileManager, src, dest string) error { return fmt.Errorf("error copying file contents: %w", err) } + if err := osFileManager.Chmod(destFile, os.FileMode(file.RegularFileModeInt)); err != nil { + return fmt.Errorf("error setting file permissions: %w", err) + } + return nil } diff --git a/cmd/gateway/initialize_test.go b/cmd/gateway/initialize_test.go index 6f0f00ad8f..04999b6cad 100644 --- a/cmd/gateway/initialize_test.go +++ b/cmd/gateway/initialize_test.go @@ -11,11 +11,11 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file/filefakes" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing/licensingfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) @@ -28,9 +28,15 @@ func TestInitialize_OSS(t *testing.T) { ic := initializeConfig{ fileManager: fakeFileMgr, logger: logr.Discard(), - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: false, } @@ -56,9 +62,15 @@ func TestInitialize_OSS_Error(t *testing.T) { ic := initializeConfig{ fileManager: fakeFileMgr, logger: logr.Discard(), - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: false, } @@ -114,9 +126,15 @@ func TestInitialize_Plus(t *testing.T) { logger: logr.Discard(), collector: fakeCollector, fileGenerator: fakeGenerator, - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: true, } @@ -133,7 +151,7 @@ func TestInitialize_Plus(t *testing.T) { g.Expect(fakeGenerator.GenerateDeploymentContextArgsForCall(0)).To(Equal(test.depCtx)) g.Expect(fakeCollector.CollectCallCount()).To(Equal(1)) g.Expect(fakeFileMgr.WriteCallCount()).To(Equal(1)) - g.Expect(fakeFileMgr.ChmodCallCount()).To(Equal(1)) + g.Expect(fakeFileMgr.ChmodCallCount()).To(Equal(3)) }) } } @@ -161,6 +179,7 @@ func TestCopyFileErrors(t *testing.T) { openErr := errors.New("open error") createErr := errors.New("create error") copyErr := errors.New("copy error") + chmodErr := errors.New("chmod error") tests := []struct { fileMgr *filefakes.FakeOSFileManager @@ -194,6 +213,15 @@ func TestCopyFileErrors(t *testing.T) { }, expErr: copyErr, }, + { + name: "can't set permissions", + fileMgr: &filefakes.FakeOSFileManager{ + ChmodStub: func(_ *os.File, _ os.FileMode) error { + return chmodErr + }, + }, + expErr: chmodErr, + }, } for _, test := range tests { diff --git a/cmd/gateway/main.go b/cmd/gateway/main.go index fc2a5949c7..515fcc3f16 100644 --- a/cmd/gateway/main.go +++ b/cmd/gateway/main.go @@ -21,8 +21,8 @@ func main() { rootCmd := createRootCommand() rootCmd.AddCommand( - createStaticModeCommand(), - createProvisionerModeCommand(), + createControllerCommand(), + createGenerateCertsCommand(), createInitializeCommand(), createSleepCommand(), ) diff --git a/cmd/gateway/validating_types.go b/cmd/gateway/validating_types.go index 42d24782cb..c0fd93da81 100644 --- a/cmd/gateway/validating_types.go +++ b/cmd/gateway/validating_types.go @@ -1,10 +1,11 @@ package main import ( + "bytes" + "encoding/csv" "fmt" "strconv" - - "k8s.io/apimachinery/pkg/types" + "strings" ) // stringValidatingValue is a string flag value with custom validation logic. @@ -30,57 +31,76 @@ func (v *stringValidatingValue) Type() string { return "string" } -type intValidatingValue struct { - validator func(v int) error - value int +// stringSliceValidatingValue is a string slice flag value with custom validation logic. +// it implements the pflag.Value interface. +type stringSliceValidatingValue struct { + validator func(v string) error + values []string + changed bool } -func (v *intValidatingValue) String() string { - return strconv.Itoa(v.value) +func (v *stringSliceValidatingValue) String() string { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(v.values) + if err != nil { + return "" + } + + w.Flush() + str := strings.TrimSuffix(b.String(), "\n") + return "[" + str + "]" } -func (v *intValidatingValue) Set(param string) error { - intVal, err := strconv.ParseInt(param, 10, 32) - if err != nil { - return fmt.Errorf("failed to parse int value: %w", err) +func (v *stringSliceValidatingValue) Set(param string) error { + if err := v.validator(param); err != nil { + return err } - if err := v.validator(int(intVal)); err != nil { + stringReader := strings.NewReader(param) + csvReader := csv.NewReader(stringReader) + str, err := csvReader.Read() + if err != nil { return err } - v.value = int(intVal) + if !v.changed { + v.values = str + } else { + v.values = append(v.values, str...) + } + v.changed = true + return nil } -func (v *intValidatingValue) Type() string { - return "int" +func (v *stringSliceValidatingValue) Type() string { + return "stringSlice" } -// namespacedNameValue is a string flag value that represents a namespaced name. -// it implements the pflag.Value interface. -type namespacedNameValue struct { - value types.NamespacedName +type intValidatingValue struct { + validator func(v int) error + value int } -func (v *namespacedNameValue) String() string { - if (v.value == types.NamespacedName{}) { - // if we don't do that, the default value in the help message will be printed as "/" - return "" - } - return v.value.String() +func (v *intValidatingValue) String() string { + return strconv.Itoa(v.value) } -func (v *namespacedNameValue) Set(param string) error { - nsname, err := parseNamespacedResourceName(param) +func (v *intValidatingValue) Set(param string) error { + intVal, err := strconv.ParseInt(param, 10, 32) if err != nil { + return fmt.Errorf("failed to parse int value: %w", err) + } + + if err := v.validator(int(intVal)); err != nil { return err } - v.value = nsname + v.value = int(intVal) return nil } -func (v *namespacedNameValue) Type() string { - return "string" +func (v *intValidatingValue) Type() string { + return "int" } diff --git a/cmd/gateway/validation.go b/cmd/gateway/validation.go index aced2ef06b..a953c522c1 100644 --- a/cmd/gateway/validation.go +++ b/cmd/gateway/validation.go @@ -8,7 +8,6 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" ) @@ -55,40 +54,6 @@ func validateResourceName(value string) error { return nil } -func validateNamespaceName(value string) error { - // used by Kubernetes to validate resource namespace names - messages := validation.IsDNS1123Label(value) - if len(messages) > 0 { - msg := strings.Join(messages, "; ") - return fmt.Errorf("invalid format: %s", msg) - } - - return nil -} - -func parseNamespacedResourceName(value string) (types.NamespacedName, error) { - if value == "" { - return types.NamespacedName{}, errors.New("must be set") - } - - parts := strings.Split(value, "/") - if len(parts) != 2 { - return types.NamespacedName{}, errors.New("invalid format; must be NAMESPACE/NAME") - } - - if err := validateNamespaceName(parts[0]); err != nil { - return types.NamespacedName{}, fmt.Errorf("invalid namespace name: %w", err) - } - if err := validateResourceName(parts[1]); err != nil { - return types.NamespacedName{}, fmt.Errorf("invalid resource name: %w", err) - } - - return types.NamespacedName{ - Namespace: parts[0], - Name: parts[1], - }, nil -} - func validateQualifiedName(name string) error { if len(name) == 0 { return errors.New("must be set") @@ -206,12 +171,15 @@ func ensureNoPortCollisions(ports ...int) error { return nil } -// validateCopyArgs ensures that arguments to the sleep command are set. -func validateCopyArgs(srcFiles []string, dest string) error { +// validateCopyArgs ensures that arguments to the initialize command are set. +func validateCopyArgs(srcFiles []string, destDirs []string) error { + if len(srcFiles) != len(destDirs) { + return errors.New("source and destination must have the same number of elements") + } if len(srcFiles) == 0 { return errors.New("source must not be empty") } - if len(dest) == 0 { + if len(destDirs) == 0 { return errors.New("destination must not be empty") } diff --git a/cmd/gateway/validation_test.go b/cmd/gateway/validation_test.go index 1774f13619..665bd91582 100644 --- a/cmd/gateway/validation_test.go +++ b/cmd/gateway/validation_test.go @@ -4,7 +4,6 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/types" ) func TestValidateGatewayControllerName(t *testing.T) { @@ -132,137 +131,6 @@ func TestValidateResourceName(t *testing.T) { } } -func TestValidateNamespaceName(t *testing.T) { - t.Parallel() - tests := []struct { - name string - value string - expErr bool - }{ - { - name: "valid", - value: "mynamespace", - expErr: false, - }, - { - name: "valid - with dash", - value: "my-namespace", - expErr: false, - }, - { - name: "valid - with numbers", - value: "mynamespace123", - expErr: false, - }, - { - name: "invalid - empty", - value: "", - expErr: true, - }, - { - name: "invalid - invalid character '.'", - value: "my.namespace", - expErr: true, - }, - { - name: "invalid - invalid character '/'", - value: "my/namespace", - expErr: true, - }, - { - name: "invalid - invalid character '_'", - value: "my_namespace", - expErr: true, - }, - { - name: "invalid - invalid character '@'", - value: "my@namespace", - expErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := validateNamespaceName(test.value) - - if test.expErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} - -func TestParseNamespacedResourceName(t *testing.T) { - t.Parallel() - tests := []struct { - name string - value string - expectedErrPrefix string - expectedNsName types.NamespacedName - expectErr bool - }{ - { - name: "valid", - value: "test/my-gateway", - expectedNsName: types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, - expectErr: false, - }, - { - name: "empty", - value: "", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "must be set", - }, - { - name: "wrong number of parts", - value: "test", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid format; must be NAMESPACE/NAME", - }, - { - name: "invalid namespace", - value: "t@st/my-gateway", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid namespace name", - }, - { - name: "invalid name", - value: "test/my-g@teway", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid resource name", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - nsName, err := parseNamespacedResourceName(test.value) - - if test.expectErr { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(HavePrefix(test.expectedErrPrefix)) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(nsName).To(Equal(test.expectedNsName)) - } - }) - } -} - func TestValidateQualifiedName(t *testing.T) { t.Parallel() tests := []struct { @@ -554,33 +422,39 @@ func TestEnsureNoPortCollisions(t *testing.T) { g.Expect(ensureNoPortCollisions(9113, 9113)).ToNot(Succeed()) } -func TestValidateSleepArgs(t *testing.T) { +func TestValidateInitializeArgs(t *testing.T) { t.Parallel() tests := []struct { name string - dest string + destDirs []string srcFiles []string expErr bool }{ { name: "valid values", - dest: "/dest/file", + destDirs: []string{"/dest/"}, srcFiles: []string{"/src/file"}, expErr: false, }, { name: "invalid dest", - dest: "", + destDirs: []string{}, srcFiles: []string{"/src/file"}, expErr: true, }, { name: "invalid src", - dest: "/dest/file", + destDirs: []string{"/dest/"}, srcFiles: []string{}, expErr: true, }, + { + name: "different lengths", + destDirs: []string{"/dest/"}, + srcFiles: []string{"src1", "src2"}, + expErr: true, + }, } for _, tc := range tests { @@ -588,7 +462,7 @@ func TestValidateSleepArgs(t *testing.T) { t.Parallel() g := NewWithT(t) - err := validateCopyArgs(tc.srcFiles, tc.dest) + err := validateCopyArgs(tc.srcFiles, tc.destDirs) if !tc.expErr { g.Expect(err).ToNot(HaveOccurred()) } else { diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index 83f89a9ff9..2e961a2c10 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -14,18 +14,22 @@ spec: listKind: NginxProxyList plural: nginxproxies singular: nginxproxy - scope: Cluster + scope: Namespaced versions: - additionalPrinterColumns: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1alpha2 schema: openAPIV3Schema: description: |- - NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way - to configure global settings for all Gateways defined from the GatewayClass. + NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef + or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. + If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. + If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass + reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those + set on the GatewayClass NginxProxy. properties: apiVersion: description: |- @@ -50,7 +54,7 @@ spec: disableHTTP2: description: |- DisableHTTP2 defines if http2 should be disabled for all servers. - Default is false, meaning http2 will be enabled for all servers. + If not specified, or set to false, http2 will be enabled for all servers. type: boolean ipFamily: default: dual @@ -62,9 +66,3489 @@ spec: - ipv4 - ipv6 type: string + kubernetes: + description: Kubernetes contains the configuration for the NGINX Deployment + and Service Kubernetes objects. + properties: + deployment: + description: |- + Deployment is the configuration for the NGINX Deployment. + This is the default deployment option. + properties: + container: + description: Container defines container fields for the NGINX + container. + properties: + debug: + description: Debug enables debugging for NGINX by using + the nginx-debug binary. + type: boolean + image: + description: Image is the NGINX image to use. + properties: + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image. + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + description: |- + Repository is the image path. + Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + type: string + tag: + description: Tag is the image tag to use. Default + matches the tag of the control plane. + type: string + type: object + lifecycle: + description: |- + Lifecycle describes actions that the management system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + until the action is complete, unless the container process fails, in which case the handler is aborted. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + resources: + description: Resources describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + volumeMounts: + description: VolumeMounts describe the mounting of Volumes + within a container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + pod: + description: Pod defines Pod-specific fields. + properties: + affinity: + description: Affinity is the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + type: object + terminationGracePeriodSeconds: + description: |- + TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: Tolerations allow the scheduler to schedule + Pods with matching taints. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + description: Volumes represents named volumes in a pod + that may be accessed by any container in the pod. + items: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk + in the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the + mounted root, rather than the full Ceph tree, + default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API + about the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API + volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun + number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the + dataset. This is unique identifier of a Flocker + dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for + the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun + number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a + Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and + uid are supported.' + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or contain + the ''..'' path. Must be utf-8 + encoded. The first item of + the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about + the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of + the ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile ID + associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + replicas: + description: Number of desired Pods. + format: int32 + type: integer + type: object + service: + description: Service is the configuration for the NGINX Service. + properties: + externalTrafficPolicy: + default: Local + description: |- + ExternalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + and LoadBalancer IPs. + enum: + - Cluster + - Local + type: string + loadBalancerClass: + description: |- + LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + Requires service type to be LoadBalancer. + type: string + loadBalancerIP: + description: LoadBalancerIP is a static IP address for the + load balancer. Requires service type to be LoadBalancer. + type: string + loadBalancerSourceRanges: + description: |- + LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + Requires service type to be LoadBalancer. + items: + type: string + type: array + nodePorts: + description: |- + NodePorts are the list of NodePorts to expose on the NGINX data plane service. + Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + The default NodePort range enforced by Kubernetes is 30000-32767. + items: + description: |- + NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST + map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort + automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. + properties: + listenerPort: + description: |- + ListenerPort is the Gateway listener port that this NodePort maps to. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + port: + description: |- + Port is the NodePort to expose. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + required: + - listenerPort + - port + type: object + type: array + type: + default: LoadBalancer + description: ServiceType describes ingress method for the + Service. + enum: + - ClusterIP + - LoadBalancer + - NodePort + type: string + type: object + type: object logging: description: Logging defines logging related settings for NGINX. properties: + agentLevel: + default: info + description: |- + AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + re-roll of the NGINX deployment. + enum: + - debug + - info + - error + - panic + - fatal + type: string errorLevel: default: info description: |- @@ -83,6 +3567,22 @@ spec: - emerg type: string type: object + metrics: + description: |- + Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + re-roll of the NGINX deployment. + properties: + disable: + description: Disable serving Prometheus metrics on the listen + port. + type: boolean + port: + description: Port where the Prometheus metrics are exposed. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object nginxPlus: description: NginxPlus specifies NGINX Plus additional settings. properties: @@ -141,7 +3641,6 @@ spec: If a request comes from a trusted address, NGINX will rewrite the client IP information, and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. To trust all addresses (not recommended for production), set to 0.0.0.0/0. If no addresses are provided, NGINX will not rewrite the client IP information. Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from @@ -166,9 +3665,6 @@ spec: type: object maxItems: 16 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map type: object x-kubernetes-validations: - message: if mode is set, trustedAddresses is a required field @@ -177,6 +3673,16 @@ spec: telemetry: description: Telemetry specifies the OpenTelemetry configuration. properties: + disabledFeatures: + description: DisabledFeatures specifies OpenTelemetry features + to be disabled. + items: + description: DisableTelemetryFeature is a telemetry feature + that can be disabled. + enum: + - DisableTracing + type: string + type: array exporter: description: Exporter specifies OpenTelemetry export parameters. properties: @@ -206,8 +3712,6 @@ spec: Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter pattern: ^[0-9]{1,4}(ms|s|m|h)?$ type: string - required: - - endpoint type: object serviceName: description: |- diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml deleted file mode 100644 index 698f9b82ca..0000000000 --- a/config/tests/static-deployment.yaml +++ /dev/null @@ -1,178 +0,0 @@ ---- -# Source: nginx-gateway-fabric/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-gateway - namespace: nginx-gateway - labels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/version: "edge" -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - template: - metadata: - labels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - spec: - initContainers: - - name: init - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-includes-bootstrap - mountPath: /includes - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - containers: - - args: - - static-mode - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx - - --config=nginx-gateway-config - - --service=nginx-gateway - - --metrics-disable - - --health-port=8081 - - --leader-election-lock-name=nginx-gateway-leader-election - - --product-telemetry-disable - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway - ports: - - name: health - containerPort: 8081 - readinessProbe: - httpGet: - path: /readyz - port: health - initialDelaySeconds: 3 - periodSeconds: 1 - securityContext: - seccompProfile: - type: RuntimeDefault - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-cache - mountPath: /var/cache/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - terminationGracePeriodSeconds: 30 - serviceAccountName: nginx-gateway - shareProcessNamespace: true - securityContext: - fsGroup: 1001 - runAsNonRoot: true - volumes: - - name: nginx-conf - emptyDir: {} - - name: nginx-stream-conf - emptyDir: {} - - name: nginx-main-includes - emptyDir: {} - - name: nginx-secrets - emptyDir: {} - - name: nginx-run - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-includes - emptyDir: {} - - name: nginx-includes-bootstrap - configMap: - name: nginx-includes-bootstrap diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml deleted file mode 100644 index bd222a1ece..0000000000 --- a/deploy/aws-nlb/deploy.yaml +++ /dev/null @@ -1,391 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-gateway ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway -rules: -- apiGroups: - - "" - resources: - - namespaces - - services - - secrets - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - verbs: - - list -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses - - gateways - - httproutes - - referencegrants - - grpcroutes - verbs: - - list - - watch -- apiGroups: - - gateway.networking.k8s.io - resources: - - httproutes/status - - gateways/status - - gatewayclasses/status - - grpcroutes/status - verbs: - - update -- apiGroups: - - gateway.nginx.org - resources: - - nginxgateways - verbs: - - get - - list - - watch -- apiGroups: - - gateway.nginx.org - resources: - - nginxproxies - - clientsettingspolicies - - observabilitypolicies - - upstreamsettingspolicies - verbs: - - list - - watch -- apiGroups: - - gateway.nginx.org - resources: - - nginxgateways/status - - clientsettingspolicies/status - - observabilitypolicies/status - - upstreamsettingspolicies/status - verbs: - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway -subjects: -- kind: ServiceAccount - name: nginx-gateway - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-type: external - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - spec: - containers: - - args: - - static-mode - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx - - --config=nginx-gateway-config - - --service=nginx-gateway - - --metrics-port=9113 - - --health-port=8081 - - --leader-election-lock-name=nginx-gateway-leader-election - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway - ports: - - containerPort: 9113 - name: metrics - - containerPort: 8081 - name: health - readinessProbe: - httpGet: - path: /readyz - port: health - initialDelaySeconds: 3 - periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: GatewayClass -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx -spec: - controllerName: gateway.nginx.org/nginx-gateway-controller ---- -apiVersion: gateway.nginx.org/v1alpha1 -kind: NginxGateway -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-config - namespace: nginx-gateway -spec: - logging: - level: info diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 990adedf38..6fd37f8a86 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,26 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -64,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -128,33 +173,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -167,20 +217,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -208,19 +253,16 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -233,10 +275,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -250,40 +300,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -292,78 +308,65 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - nodeSelector: - kubernetes.io/os: linux + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -375,6 +378,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -388,3 +396,28 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + pod: + nodeSelector: + kubernetes.io/os: linux + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 7891f908c7..3f37a7e7bb 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -599,18 +599,22 @@ spec: listKind: NginxProxyList plural: nginxproxies singular: nginxproxy - scope: Cluster + scope: Namespaced versions: - additionalPrinterColumns: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1alpha2 schema: openAPIV3Schema: description: |- - NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way - to configure global settings for all Gateways defined from the GatewayClass. + NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef + or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. + If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. + If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass + reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those + set on the GatewayClass NginxProxy. properties: apiVersion: description: |- @@ -635,7 +639,7 @@ spec: disableHTTP2: description: |- DisableHTTP2 defines if http2 should be disabled for all servers. - Default is false, meaning http2 will be enabled for all servers. + If not specified, or set to false, http2 will be enabled for all servers. type: boolean ipFamily: default: dual @@ -647,9 +651,3489 @@ spec: - ipv4 - ipv6 type: string + kubernetes: + description: Kubernetes contains the configuration for the NGINX Deployment + and Service Kubernetes objects. + properties: + deployment: + description: |- + Deployment is the configuration for the NGINX Deployment. + This is the default deployment option. + properties: + container: + description: Container defines container fields for the NGINX + container. + properties: + debug: + description: Debug enables debugging for NGINX by using + the nginx-debug binary. + type: boolean + image: + description: Image is the NGINX image to use. + properties: + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image. + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + description: |- + Repository is the image path. + Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + type: string + tag: + description: Tag is the image tag to use. Default + matches the tag of the control plane. + type: string + type: object + lifecycle: + description: |- + Lifecycle describes actions that the management system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + until the action is complete, unless the container process fails, in which case the handler is aborted. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + resources: + description: Resources describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + volumeMounts: + description: VolumeMounts describe the mounting of Volumes + within a container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + pod: + description: Pod defines Pod-specific fields. + properties: + affinity: + description: Affinity is the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + type: object + terminationGracePeriodSeconds: + description: |- + TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: Tolerations allow the scheduler to schedule + Pods with matching taints. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + description: Volumes represents named volumes in a pod + that may be accessed by any container in the pod. + items: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk + in the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the + mounted root, rather than the full Ceph tree, + default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API + about the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API + volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun + number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the + dataset. This is unique identifier of a Flocker + dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for + the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun + number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a + Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and + uid are supported.' + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or contain + the ''..'' path. Must be utf-8 + encoded. The first item of + the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about + the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of + the ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile ID + associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + replicas: + description: Number of desired Pods. + format: int32 + type: integer + type: object + service: + description: Service is the configuration for the NGINX Service. + properties: + externalTrafficPolicy: + default: Local + description: |- + ExternalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + and LoadBalancer IPs. + enum: + - Cluster + - Local + type: string + loadBalancerClass: + description: |- + LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + Requires service type to be LoadBalancer. + type: string + loadBalancerIP: + description: LoadBalancerIP is a static IP address for the + load balancer. Requires service type to be LoadBalancer. + type: string + loadBalancerSourceRanges: + description: |- + LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + Requires service type to be LoadBalancer. + items: + type: string + type: array + nodePorts: + description: |- + NodePorts are the list of NodePorts to expose on the NGINX data plane service. + Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + The default NodePort range enforced by Kubernetes is 30000-32767. + items: + description: |- + NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST + map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort + automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. + properties: + listenerPort: + description: |- + ListenerPort is the Gateway listener port that this NodePort maps to. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + port: + description: |- + Port is the NodePort to expose. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + required: + - listenerPort + - port + type: object + type: array + type: + default: LoadBalancer + description: ServiceType describes ingress method for the + Service. + enum: + - ClusterIP + - LoadBalancer + - NodePort + type: string + type: object + type: object logging: description: Logging defines logging related settings for NGINX. properties: + agentLevel: + default: info + description: |- + AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + re-roll of the NGINX deployment. + enum: + - debug + - info + - error + - panic + - fatal + type: string errorLevel: default: info description: |- @@ -668,6 +4152,22 @@ spec: - emerg type: string type: object + metrics: + description: |- + Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + re-roll of the NGINX deployment. + properties: + disable: + description: Disable serving Prometheus metrics on the listen + port. + type: boolean + port: + description: Port where the Prometheus metrics are exposed. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object nginxPlus: description: NginxPlus specifies NGINX Plus additional settings. properties: @@ -726,7 +4226,6 @@ spec: If a request comes from a trusted address, NGINX will rewrite the client IP information, and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. To trust all addresses (not recommended for production), set to 0.0.0.0/0. If no addresses are provided, NGINX will not rewrite the client IP information. Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from @@ -751,9 +4250,6 @@ spec: type: object maxItems: 16 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map type: object x-kubernetes-validations: - message: if mode is set, trustedAddresses is a required field @@ -762,6 +4258,16 @@ spec: telemetry: description: Telemetry specifies the OpenTelemetry configuration. properties: + disabledFeatures: + description: DisabledFeatures specifies OpenTelemetry features + to be disabled. + items: + description: DisableTelemetryFeature is a telemetry feature + that can be disabled. + enum: + - DisableTracing + type: string + type: array exporter: description: Exporter specifies OpenTelemetry export parameters. properties: @@ -791,8 +4297,6 @@ spec: Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter pattern: ^[0-9]{1,4}(ms|s|m|h)?$ type: string - required: - - endpoint type: object serviceName: description: |- diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index 9a0746a1d9..28f9bbec55 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,26 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -64,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -128,33 +173,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -167,20 +217,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -208,19 +253,16 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -233,10 +275,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -250,40 +300,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -292,76 +308,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -373,6 +376,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -386,3 +394,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 69f8a68c58..68bdf72e43 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -15,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -26,32 +53,35 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -73,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -141,38 +177,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -185,20 +221,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -226,11 +257,13 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 @@ -238,10 +271,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --gateway-api-experimental-features env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -254,10 +283,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -271,40 +308,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -313,89 +316,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -407,6 +384,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -420,3 +402,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index 0248ce832e..be7273edd4 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,27 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -65,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -133,33 +177,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -172,20 +221,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -213,20 +257,17 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election - --gateway-api-experimental-features env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -239,10 +280,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -256,40 +305,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -298,76 +313,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -379,6 +381,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -392,3 +399,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 4d7180632d..7bdb4fe3c9 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -15,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -26,31 +53,35 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -72,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -136,38 +173,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -180,20 +217,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -221,21 +253,19 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -248,10 +278,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -265,40 +303,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -307,89 +311,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -401,6 +379,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -414,3 +397,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index 414317999b..909270a96b 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,26 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -64,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -128,33 +173,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -167,20 +217,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: NodePort + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -208,19 +253,16 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -233,10 +275,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -250,40 +300,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -292,76 +308,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -373,6 +376,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -386,3 +394,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: NodePort diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index 61a8f82587..d5bc82d23f 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,26 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -64,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -130,39 +175,57 @@ rules: - security.openshift.io resourceNames: - nginx-gateway-scc + - nginx-gateway-scc-nginx resources: - securitycontextconstraints verbs: - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - update + - delete + - list + - get + - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -175,20 +238,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -216,19 +274,17 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election + - --nginx-scc=nginx-gateway-scc-nginx env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -241,10 +297,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -258,40 +322,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -300,76 +330,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -381,6 +398,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -395,6 +417,28 @@ spec: logging: level: info --- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer +--- allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false @@ -402,9 +446,6 @@ allowHostPID: false allowHostPorts: false allowPrivilegeEscalation: false allowPrivilegedContainer: false -allowedCapabilities: -- NET_BIND_SERVICE -- KILL apiVersion: security.openshift.io/v1 fsGroup: ranges: @@ -413,13 +454,17 @@ fsGroup: type: MustRunAs kind: SecurityContextConstraints metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge name: nginx-gateway-scc readOnlyRootFilesystem: true requiredDropCapabilities: - ALL runAsUser: type: MustRunAsRange - uidRangeMax: 102 + uidRangeMax: 101 uidRangeMin: 101 seLinuxContext: type: MustRunAs @@ -433,6 +478,87 @@ supplementalGroups: users: - system:serviceaccount:nginx-gateway:nginx-gateway volumes: +- secret +--- +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: false +allowPrivilegedContainer: false +apiVersion: security.openshift.io/v1 +fsGroup: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +kind: SecurityContextConstraints +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-scc-cert-generator +readOnlyRootFilesystem: true +requiredDropCapabilities: +- ALL +runAsUser: + type: MustRunAsRange + uidRangeMax: 101 + uidRangeMin: 101 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +supplementalGroups: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +users: +- system:serviceaccount:nginx-gateway:nginx-gateway-cert-generator +volumes: +- projected +--- +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +- NET_BIND_SERVICE +apiVersion: security.openshift.io/v1 +fsGroup: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +kind: SecurityContextConstraints +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-scc-nginx +readOnlyRootFilesystem: true +requiredDropCapabilities: +- ALL +runAsUser: + type: MustRunAsRange + uidRangeMax: 101 + uidRangeMin: 101 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +supplementalGroups: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +volumes: - emptyDir - secret - configMap diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 4e896d2f22..e6b27f01fb 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -15,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -26,31 +53,35 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -72,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,38 +175,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -182,20 +219,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -223,11 +255,13 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 @@ -235,10 +269,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --snippets-filters env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -251,10 +281,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -268,40 +306,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -310,89 +314,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -404,6 +382,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -417,3 +400,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 8f220c8fe3..714376f7f7 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -24,26 +53,36 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments verbs: - - get + - create + - update + - delete - list + - get - watch - apiGroups: - "" resources: + - namespaces - pods verbs: - get + - list + - watch - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -64,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -130,33 +175,38 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-gateway + name: nginx-gateway-cert-generator + namespace: nginx-gateway roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway + kind: Role + name: nginx-gateway-cert-generator subjects: - kind: ServiceAccount - name: nginx-gateway + name: nginx-gateway-cert-generator namespace: nginx-gateway --- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway app.kubernetes.io/version: edge - name: nginx-includes-bootstrap + name: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-gateway +subjects: +- kind: ServiceAccount + name: nginx-gateway namespace: nginx-gateway --- apiVersion: v1 @@ -169,20 +219,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -210,20 +255,17 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election - --snippets-filters env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -236,10 +278,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -253,40 +303,6 @@ spec: securityContext: allowPrivilegeEscalation: false capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE drop: - ALL readOnlyRootFilesystem: true @@ -295,76 +311,63 @@ spec: seccompProfile: type: RuntimeDefault volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls env: - - name: POD_UID + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: metadata.uid + fieldPath: metadata.namespace image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always - name: init + name: cert-generator securityContext: + allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes + restartPolicy: Never securityContext: fsGroup: 1001 runAsNonRoot: true - serviceAccountName: nginx-gateway - shareProcessNamespace: true - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -376,6 +379,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -389,3 +397,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/docs/developer/quickstart.md b/docs/developer/quickstart.md index 058d3d5b9d..697acf5036 100644 --- a/docs/developer/quickstart.md +++ b/docs/developer/quickstart.md @@ -183,13 +183,13 @@ This will build the docker images `nginx-gateway-fabric:` and `nginx- - To install with Helm (where your release name is `my-release`): ```shell - helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never -n nginx-gateway + helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set nginx.service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never -n nginx-gateway ``` - To install NGINX Plus with Helm (where your release name is `my-release`): ```shell - helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx-plus --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never --set nginx.plus=true -n nginx-gateway + helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set nginx.service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx-plus --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never --set nginx.plus=true -n nginx-gateway ``` > For more information on Helm configuration options see the Helm [README](../../charts/nginx-gateway-fabric/README.md). diff --git a/docs/developer/release-process.md b/docs/developer/release-process.md index 83278a5e1d..5179d68cc8 100644 --- a/docs/developer/release-process.md +++ b/docs/developer/release-process.md @@ -44,9 +44,8 @@ To create a new release, follow these steps: 1. Kick off the [longevity tests](https://github.com/nginx/nginx-gateway-fabric/blob/main/tests/README.md#longevity-testing) for both OSS and Plus. You'll need to create two clusters and VMs for this. Before running, update your `vars.env` file with the proper image tag and prefixes. NGF and nginx images will be available from `ghcr.io`, and nginx plus will be available in GCP (`us-docker.pkg.dev//nginx-gateway-fabric/nginx-plus`). These tests need to run for 4 days before releasing. The results should be committed to the main branch and then cherry-picked to the release branch. 2. Kick off the [NFR workflow](https://github.com/nginx/nginx-gateway-fabric/actions/workflows/nfr.yml) in the browser. For `image_tag`, use `release-X.X-rc`, and for `version`, use the upcoming `X.Y.Z` NGF version. Run the workflow on the new release branch. This will run all of the NFR tests which are automated and open a PR with the results files when it is complete. Review this PR and make any necessary changes before merging. Once merged, be sure to cherry-pick the commit to the main branch as well (the original PR targets the release branch). 5. Run the [Release PR](https://github.com/nginx/nginx-gateway-fabric/actions/workflows/release-pr.yml) workflow to update the repo files for the release. Then there are a few manual steps to complete: - 1. Update the version tag used in the [provisioner manifest](/tests/conformance/provisioner/provisioner.yaml) and [getting started guide](/site/content/get-started.md). - 2. Update the [README](/README.md) to include information about the release. - 3. Update the [changelog](/CHANGELOG.md). There is going to be a new blank section generated by the automation that needs to be adjusted accordingly. + 1. Update the [README](/README.md) to include information about the release. + 2. Update the [changelog](/CHANGELOG.md). There is going to be a new blank section generated by the automation that needs to be adjusted accordingly. - At the top there will be a list of all PRs that are labeled with `release-notes`. The changelog includes only important (from the user perspective) changes to NGF. This is in contrast with the autogenerated full changelog, which is created in the next diff --git a/docs/developer/testing.md b/docs/developer/testing.md index 7804930ff3..bc19f145cb 100644 --- a/docs/developer/testing.md +++ b/docs/developer/testing.md @@ -69,19 +69,19 @@ Follow the steps below for manual testing: - Logs of the `nginx-gateway` container. Look out for unexpected error logs or panics. ```shell - kubectl logs -n nginx-gateway -l app=nginx-gateway + kubectl -n nginx-gateway logs ``` - Logs of the `nginx` container. Look for unexpected error logs and verify the access logs are correct. ```shell - kubectl logs -n nginx-gateway -l app=nginx + kubectl -n logs ``` - The generated nginx config. Make sure it's correct. ```shell - kubectl exec -it -n nginx-gateway -c nginx -- nginx -T + kubectl exec -it -n -- nginx -T ``` - The statuses of the Gateway API Resources. Make sure they look correct. diff --git a/docs/proposals/nginx-extensions.md b/docs/proposals/nginx-extensions.md index 497de1355f..6bf72fd300 100644 --- a/docs/proposals/nginx-extensions.md +++ b/docs/proposals/nginx-extensions.md @@ -155,7 +155,7 @@ spec: name: my-annotation ``` -Infrastructure labels and annotations should be applied to all resources created in response to the Gateway. This only applies to _automated deployments_ (i.e., provisioner mode), implementations that automatically deploy the data plane based on a Gateway. +Infrastructure labels and annotations should be applied to all resources created in response to the Gateway. Other use cases for this API are Service type, Service IP, CPU memory requests, affinity rules, and Gateway routability (public, private, and cluster). ### TLS Options diff --git a/embedded.go b/embedded.go deleted file mode 100644 index 0147f76f0e..0000000000 --- a/embedded.go +++ /dev/null @@ -1,11 +0,0 @@ -package embeddedfiles - -import _ "embed" - -// StaticModeDeploymentYAML contains the YAML manifest of the Deployment resource for the static mode. -// We put this in the root of the repo because goembed doesn't support relative/absolute paths and symlinks, -// and we want to keep the static mode deployment manifest for the provisioner in the config/tests/ -// directory. -// -//go:embed config/tests/static-deployment.yaml -var StaticModeDeploymentYAML []byte diff --git a/examples/cafe-example/README.md b/examples/cafe-example/README.md index e0254de815..d10f871e05 100644 --- a/examples/cafe-example/README.md +++ b/examples/cafe-example/README.md @@ -9,18 +9,6 @@ to route traffic to that application using HTTPRoute resources. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Cafe Application 1. Create the coffee and the tea Deployments and Services: @@ -49,6 +37,15 @@ to route traffic to that application using HTTPRoute resources. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell diff --git a/examples/cross-namespace-routing/README.md b/examples/cross-namespace-routing/README.md index 3e774cff46..9e98908a63 100644 --- a/examples/cross-namespace-routing/README.md +++ b/examples/cross-namespace-routing/README.md @@ -9,18 +9,6 @@ in a different namespace from our HTTPRoutes. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Cafe Application 1. Create the cafe namespace and cafe application: @@ -49,6 +37,15 @@ in a different namespace from our HTTPRoutes. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell diff --git a/examples/grpc-routing/README.md b/examples/grpc-routing/README.md index d58ac43fd1..3f294978ce 100644 --- a/examples/grpc-routing/README.md +++ b/examples/grpc-routing/README.md @@ -9,18 +9,6 @@ to route traffic to that application using GRPCRoute resources. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Helloworld Application 1. Create the two helloworld Deployments and Services: @@ -60,7 +48,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f exact-method.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "exact"}' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -72,7 +69,7 @@ There are 3 options to configure gRPC routing. To access the application and tes } ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f exact-method.yaml @@ -86,7 +83,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f hostname.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "bar server"}' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -132,7 +138,7 @@ There are 3 options to configure gRPC routing. To access the application and tes 2024/04/29 09:29:46 Received: foo bar server ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f hostname.yaml @@ -146,7 +152,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f headers.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "version one"}' -H 'version: one' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -230,7 +245,7 @@ There are 3 options to configure gRPC routing. To access the application and tes 2024/04/29 09:33:26 Received: version two orange ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f headers.yaml diff --git a/examples/helm/README.md b/examples/helm/README.md index 7d66f2ee4a..e6b6edfcc7 100644 --- a/examples/helm/README.md +++ b/examples/helm/README.md @@ -14,7 +14,6 @@ This directory contains examples of Helm charts that can be used to deploy NGINX The secret must be created in the same namespace as the NGINX Gateway Fabric deployment. - [Experimental](./experimental) - deploys NGINX Gateway Fabric with the Gateway API experimental features enabled and NGINX OSS as the data plane. - [Experimental with NGINX Plus](./experimental-nginx-plus) - deploys NGINX Gateway Fabric with the Gateway API experimental features enabled and NGINX Plus as the data plane. The image is pulled from the NGINX Plus Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. The secret must be created in the same namespace as the NGINX Gateway Fabric deployment. -- [AWS NLB](./aws-nlb) - deploys NGINX Gateway Fabric with NGINX OSS using a Service of type `LoadBalancer` to allocate an AWS Network Load Balancer (NLB). - [Azure](./azure) - deploys NGINX Gateway Fabric with NGINX OSS using a nodeSelector to deploy the gateway on Linux nodes in an Azure Kubernetes Service (AKS) cluster. - [NodePort](./nodeport) - deploys NGINX Gateway Fabric with NGINX OSS using a Service of type `NodePort` to expose the gateway on a specific port on each node. diff --git a/examples/helm/aws-nlb/values.yaml b/examples/helm/aws-nlb/values.yaml deleted file mode 100644 index b1ffc87974..0000000000 --- a/examples/helm/aws-nlb/values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -nginxGateway: - name: nginx-gateway -service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "external" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" diff --git a/examples/helm/azure/values.yaml b/examples/helm/azure/values.yaml index 3dbfc24256..ee6669108c 100644 --- a/examples/helm/azure/values.yaml +++ b/examples/helm/azure/values.yaml @@ -1,4 +1,8 @@ nginxGateway: name: nginx-gateway -nodeSelector: - kubernetes.io/os: linux + nodeSelector: + kubernetes.io/os: linux +nginx: + pod: + nodeSelector: + kubernetes.io/os: linux diff --git a/examples/helm/experimental-nginx-plus/values.yaml b/examples/helm/experimental-nginx-plus/values.yaml index 08469ce364..e1d854fd3a 100644 --- a/examples/helm/experimental-nginx-plus/values.yaml +++ b/examples/helm/experimental-nginx-plus/values.yaml @@ -7,6 +7,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/examples/helm/nginx-plus/values.yaml b/examples/helm/nginx-plus/values.yaml index b8b842d16a..0b85bfc51b 100644 --- a/examples/helm/nginx-plus/values.yaml +++ b/examples/helm/nginx-plus/values.yaml @@ -5,6 +5,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/examples/helm/nodeport/values.yaml b/examples/helm/nodeport/values.yaml index 17da6a8849..93318a7b96 100644 --- a/examples/helm/nodeport/values.yaml +++ b/examples/helm/nodeport/values.yaml @@ -1,4 +1,5 @@ nginxGateway: name: nginx-gateway -service: - type: NodePort +nginx: + service: + type: NodePort diff --git a/examples/helm/snippets-filters-nginx-plus/values.yaml b/examples/helm/snippets-filters-nginx-plus/values.yaml index 9cacfdb168..89cc0b59b4 100644 --- a/examples/helm/snippets-filters-nginx-plus/values.yaml +++ b/examples/helm/snippets-filters-nginx-plus/values.yaml @@ -7,6 +7,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/examples/https-termination/README.md b/examples/https-termination/README.md index 7d811babe1..8e7245e467 100644 --- a/examples/https-termination/README.md +++ b/examples/https-termination/README.md @@ -1,214 +1,3 @@ -# HTTPS Termination Example +# HTTPS Termination -In this example, we expand on the simple [cafe-example](../cafe-example) by adding HTTPS termination to our routes and -an HTTPS redirect from port 80 to 443. We will also show how you can use a ReferenceGrant to permit your Gateway to -reference a Secret in a different Namespace. - -## Running the Example - -## 1. Deploy NGINX Gateway Fabric - -1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. - -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the ports of NGINX Gateway Fabric: - - ```text - GW_HTTP_PORT= - GW_HTTPS_PORT= - ``` - -## 2. Deploy the Cafe Application - -1. Create the coffee and the tea Deployments and Services: - - ```shell - kubectl apply -f cafe.yaml - ``` - -1. Check that the Pods are running in the `default` namespace: - - ```shell - kubectl -n default get pods - ``` - - ```text - NAME READY STATUS RESTARTS AGE - coffee-6f4b79b975-2sb28 1/1 Running 0 12s - tea-6fb46d899f-fm7zr 1/1 Running 0 12s - ``` - -## 3. Configure HTTPS Termination and Routing - -1. Create the Namespace `certificate` and a Secret with a TLS certificate and key: - - ```shell - kubectl apply -f certificate-ns-and-cafe-secret.yaml - ``` - - The TLS certificate and key in this Secret are used to terminate the TLS connections for the cafe application. - > **Important**: This certificate and key are for demo purposes only. - -1. Create the ReferenceGrant: - - ```shell - kubectl apply -f reference-grant.yaml - ``` - - This ReferenceGrant allows all Gateways in the `default` namespace to reference the `cafe-secret` Secret in - the `certificate` Namespace. - -1. Create the Gateway resource: - - ```shell - kubectl apply -f gateway.yaml - ``` - - This [Gateway](./gateway.yaml) configures: - - `http` listener for HTTP traffic - - `https` listener for HTTPS traffic. It terminates TLS connections using the `cafe-secret` we created in step 1. - -1. Create the HTTPRoute resources: - - ```shell - kubectl apply -f cafe-routes.yaml - ``` - - To configure HTTPS termination for our cafe application, we will bind our `coffee` and `tea` HTTPRoutes to - the `https` listener in [cafe-routes.yaml](./cafe-routes.yaml) using - the [`parentReference`](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.ParentReference) - field: - - ```yaml - parentRefs: - - name: gateway - sectionName: https - ``` - - To configure an HTTPS redirect from port 80 to 443, we will bind the special `cafe-tls-redirect` HTTPRoute with - a [`HTTPRequestRedirectFilter`](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.HTTPRequestRedirectFilter) - to the `http` listener: - - ```yaml - parentRefs: - - name: gateway - sectionName: http - ``` - -## 4. Test the Application - -To access the application, we will use `curl` to send requests to the `coffee` and `tea` Services. First, we will access -the application over HTTP to test that the HTTPS redirect works. Then we will use HTTPS. - -### 4.1 Test HTTPS Redirect - -To test that NGINX sends an HTTPS redirect, we will send requests to the `coffee` and `tea` Services on HTTP port. We -will use curl's `--include` option to print the response headers (we are interested in the `Location` header). - -To get a redirect for coffee: - -```shell -curl --resolve cafe.example.com:$GW_HTTP_PORT:$GW_IP http://cafe.example.com:$GW_HTTP_PORT/coffee --include -``` - -```text -HTTP/1.1 302 Moved Temporarily -... -Location: https://cafe.example.com/coffee -... -``` - -To get a redirect for tea: - -```shell -curl --resolve cafe.example.com:$GW_HTTP_PORT:$GW_IP http://cafe.example.com:$GW_HTTP_PORT/tea --include -``` - -```text -HTTP/1.1 302 Moved Temporarily -... -Location: https://cafe.example.com/tea -... -``` - -### 4.2 Access Coffee and Tea - -Now we will access the application over HTTPS. Since our certificate is self-signed, we will use curl's `--insecure` -option to turn off certificate verification. - -To get coffee: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/coffee --insecure -``` - -```text -Server address: 10.12.0.18:80 -Server name: coffee-7586895968-r26zn -``` - -To get tea: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/tea --insecure -``` - -```text -Server address: 10.12.0.19:80 -Server name: tea-7cd44fcb4d-xfw2x -``` - -### 4.3 Remove the ReferenceGrant - -To restrict access to the `cafe-secret` in the `certificate` Namespace, we can delete the ReferenceGrant we created in -Step 3: - -```shell -kubectl delete -f reference-grant.yaml -``` - -Now, if we try to access the application over HTTPS, we will get a connection refused error: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/coffee --insecure -vvv -``` - -```text -... -curl: (7) Failed to connect to cafe.example.com port 443 after 0 ms: Connection refused -``` - - -You can also check the conditions of the Gateway `https` Listener to verify the that the reference is not permitted: - -```shell - kubectl describe gateway gateway -``` - -```text - Name: https - Conditions: - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: RefNotPermitted - Status: False - Type: Accepted - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: RefNotPermitted - Status: False - Type: ResolvedRefs - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: Invalid - Status: False - Type: Programmed -``` +This directory contains the YAML files used in the [HTTPS Termination](https://docs.nginx.com/nginx-gateway-fabric/how-to/traffic-management/https-termination/) guide. diff --git a/examples/traffic-splitting/README.md b/examples/traffic-splitting/README.md index 0479722cff..d3a07061b5 100644 --- a/examples/traffic-splitting/README.md +++ b/examples/traffic-splitting/README.md @@ -11,18 +11,6 @@ and `coffee-v2`. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Coffee Application 1. Create the Cafe Deployments and Services: @@ -51,6 +39,15 @@ and `coffee-v2`. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell diff --git a/go.mod b/go.mod index 8c5212cca9..a3d470c3ec 100644 --- a/go.mod +++ b/go.mod @@ -3,21 +3,23 @@ module github.com/nginx/nginx-gateway-fabric go 1.24.2 require ( - github.com/go-kit/log v0.2.1 + github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 + github.com/google/uuid v1.6.0 + github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f github.com/nginx/telemetry-exporter v0.1.4 - github.com/nginxinc/nginx-plus-go-client v1.3.0 - github.com/nginxinc/nginx-prometheus-exporter v1.3.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.1 + github.com/prometheus/client_golang v1.22.0 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.uber.org/zap v1.27.0 + golang.org/x/text v0.24.0 + google.golang.org/grpc v1.72.0 + google.golang.org/protobuf v1.36.6 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.32.3 @@ -28,15 +30,14 @@ require ( ) require ( + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -49,12 +50,10 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -62,7 +61,8 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect @@ -78,14 +78,11 @@ require ( golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.32.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/grpc v1.72.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 82275b5edb..373a6764ea 100644 --- a/go.sum +++ b/go.sum @@ -1,28 +1,50 @@ +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1 h1:4erM3WLgEG/HIBrpBDmRbs1puhd7p0z7kNXDuhHthwM= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1/go.mod h1:novQBstnxcGpfKf8qGRATqn1anQKwMJIbH5Q581jibU= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= +github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -30,6 +52,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -38,6 +62,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY= +github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -58,6 +84,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -66,69 +94,125 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f h1:fSUAaR1AxmmbmGMRkvKGY2+LhuVpBp7tbBFLLgDMjNQ= +github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f/go.mod h1:O/31aKtii/mpiZmFGMcTNDoLtKzwTyTXOBMSRkMaPvs= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= -github.com/nginxinc/nginx-plus-go-client v1.3.0 h1:q/aeT4B5k0KLwWlefoBzfLfraBBvIKLuDg+lLFWAo4I= -github.com/nginxinc/nginx-plus-go-client v1.3.0/go.mod h1:n8OFLzrJulJ2fur28Cwa1Qp5DZNS2VicLV+Adt30LQ4= -github.com/nginxinc/nginx-prometheus-exporter v1.3.0 h1:1JtdxsZH0Uwhu1nL/j/QyOXytP5V5j68AEo2X+DFWb0= -github.com/nginxinc/nginx-prometheus-exporter v1.3.0/go.mod h1:hXoH+X6aIKSyQuO6QTIiPKH3eZyxqy/wW8GYiE3dflU= +github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o= +github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1/go.mod h1:He+1izxYxVVO5/C9ZTukwOpvkAx5eS19nRQgKXDhX5I= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE= +github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.36.0 h1:YpffyLuHtdp5EUsI5mT4sRw8GZhO/5ozyDT1xWGXt00= +github.com/testcontainers/testcontainers-go v0.36.0/go.mod h1:yk73GVJ0KUZIHUtFna6MO7QS144qYpoY8lEEtU9Hed0= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= @@ -139,8 +223,8 @@ go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/ go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= @@ -156,6 +240,10 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= @@ -213,8 +301,8 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= diff --git a/internal/framework/controller/index/pod.go b/internal/framework/controller/index/pod.go new file mode 100644 index 0000000000..2cd5cf6818 --- /dev/null +++ b/internal/framework/controller/index/pod.go @@ -0,0 +1,19 @@ +package index + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PodIPIndexFunc is a client.IndexerFunc that parses a Pod object and returns the PodIP. +// Used by the gRPC token validator for validating a connection from NGINX agent. +func PodIPIndexFunc(obj client.Object) []string { + pod, ok := obj.(*corev1.Pod) + if !ok { + panic(fmt.Sprintf("expected an Pod; got %T", obj)) + } + + return []string{pod.Status.PodIP} +} diff --git a/internal/framework/controller/index/pod_test.go b/internal/framework/controller/index/pod_test.go new file mode 100644 index 0000000000..e89c0492da --- /dev/null +++ b/internal/framework/controller/index/pod_test.go @@ -0,0 +1,53 @@ +package index + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestPodIPIndexFunc(t *testing.T) { + t.Parallel() + testcases := []struct { + msg string + obj client.Object + expOutput []string + }{ + { + msg: "normal case", + obj: &corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + expOutput: []string{"1.2.3.4"}, + }, + { + msg: "empty status", + obj: &corev1.Pod{}, + expOutput: []string{""}, + }, + } + + for _, tc := range testcases { + t.Run(tc.msg, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + output := PodIPIndexFunc(tc.obj) + g.Expect(output).To(Equal(tc.expOutput)) + }) + } +} + +func TestPodIPIndexFuncPanics(t *testing.T) { + t.Parallel() + defer func() { + g := NewWithT(t) + g.Expect(recover()).ToNot(BeNil()) + }() + + PodIPIndexFunc(&corev1.Namespace{}) +} diff --git a/internal/framework/controller/labels.go b/internal/framework/controller/labels.go new file mode 100644 index 0000000000..79b6b55113 --- /dev/null +++ b/internal/framework/controller/labels.go @@ -0,0 +1,12 @@ +package controller + +// The following labels are added to each nginx resource created by the control plane. +const ( + GatewayLabel = "gateway.networking.k8s.io/gateway-name" + AppNameLabel = "app.kubernetes.io/name" + AppInstanceLabel = "app.kubernetes.io/instance" + AppManagedByLabel = "app.kubernetes.io/managed-by" +) + +// RestartedAnnotation is added to a Deployment or DaemonSet's PodSpec to trigger a rolling restart. +const RestartedAnnotation = "kubectl.kubernetes.io/restartedAt" diff --git a/internal/framework/controller/predicate/annotation.go b/internal/framework/controller/predicate/annotation.go index fdf1fd696f..c6c34585f6 100644 --- a/internal/framework/controller/predicate/annotation.go +++ b/internal/framework/controller/predicate/annotation.go @@ -1,8 +1,11 @@ package predicate import ( + appsv1 "k8s.io/api/apps/v1" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" ) // AnnotationPredicate implements a predicate function based on the Annotation. @@ -16,24 +19,62 @@ type AnnotationPredicate struct { } // Create filters CreateEvents based on the Annotation. -func (cp AnnotationPredicate) Create(e event.CreateEvent) bool { +func (ap AnnotationPredicate) Create(e event.CreateEvent) bool { if e.Object == nil { return false } - _, ok := e.Object.GetAnnotations()[cp.Annotation] + _, ok := e.Object.GetAnnotations()[ap.Annotation] return ok } // Update filters UpdateEvents based on the Annotation. -func (cp AnnotationPredicate) Update(e event.UpdateEvent) bool { +func (ap AnnotationPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil || e.ObjectNew == nil { // this case should not happen return false } - oldAnnotationVal := e.ObjectOld.GetAnnotations()[cp.Annotation] - newAnnotationVal := e.ObjectNew.GetAnnotations()[cp.Annotation] + oldAnnotationVal := e.ObjectOld.GetAnnotations()[ap.Annotation] + newAnnotationVal := e.ObjectNew.GetAnnotations()[ap.Annotation] return oldAnnotationVal != newAnnotationVal } + +// RestartDeploymentAnnotationPredicate skips update events if they are due to a rolling restart. +// This type of event is triggered by adding an annotation to the deployment's PodSpec. +// This is used by the provisioner to ensure it allows for rolling restarts of the nginx deployment +// without reverting the annotation and deleting the new pod(s). Otherwise, if a user changes +// the nginx deployment, we want to see that event so we can revert it back to the configuration +// that we expect it to have. +type RestartDeploymentAnnotationPredicate struct { + predicate.Funcs +} + +// Update filters UpdateEvents based on if the annotation is present or changed. +func (RestartDeploymentAnnotationPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + // this case should not happen + return false + } + + depOld, ok := e.ObjectOld.(*appsv1.Deployment) + if !ok { + return false + } + + depNew, ok := e.ObjectNew.(*appsv1.Deployment) + if !ok { + return false + } + + oldVal, oldExists := depOld.Spec.Template.Annotations[controller.RestartedAnnotation] + + if newVal, ok := depNew.Spec.Template.Annotations[controller.RestartedAnnotation]; ok { + if !oldExists || newVal != oldVal { + return false + } + } + + return true +} diff --git a/internal/framework/controller/predicate/annotation_test.go b/internal/framework/controller/predicate/annotation_test.go index 47cd762839..4ecc448b4d 100644 --- a/internal/framework/controller/predicate/annotation_test.go +++ b/internal/framework/controller/predicate/annotation_test.go @@ -4,9 +4,13 @@ import ( "testing" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" ) func TestAnnotationPredicate_Create(t *testing.T) { @@ -222,3 +226,177 @@ func TestAnnotationPredicate_Update(t *testing.T) { }) } } + +func TestRestartDeploymentAnnotationPredicate_Update(t *testing.T) { + t.Parallel() + + tests := []struct { + event event.UpdateEvent + name string + expUpdate bool + }{ + { + name: "annotation added", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "annotation changed", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "false", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "annotation removed", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + }, + }, + expUpdate: true, + }, + { + name: "annotation unchanged", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: true, + }, + { + name: "old object is nil", + event: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "new object is nil", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: nil, + }, + expUpdate: false, + }, + { + name: "both objects are nil", + event: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: nil, + }, + expUpdate: false, + }, + } + + p := RestartDeploymentAnnotationPredicate{} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + update := p.Update(test.event) + g.Expect(update).To(Equal(test.expUpdate)) + }) + } +} diff --git a/internal/framework/controller/predicate/label.go b/internal/framework/controller/predicate/label.go new file mode 100644 index 0000000000..06d1d157a6 --- /dev/null +++ b/internal/framework/controller/predicate/label.go @@ -0,0 +1,18 @@ +package predicate + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8spredicate "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// NginxLabelPredicate returns a predicate that only matches resources with the nginx labels. +func NginxLabelPredicate(selector metav1.LabelSelector) k8spredicate.Predicate { + labelPredicate, err := k8spredicate.LabelSelectorPredicate(selector) + if err != nil { + panic(fmt.Sprintf("error creating label selector: %v", err)) + } + + return labelPredicate +} diff --git a/internal/framework/controller/predicate/secret.go b/internal/framework/controller/predicate/secret.go new file mode 100644 index 0000000000..0e28679d89 --- /dev/null +++ b/internal/framework/controller/predicate/secret.go @@ -0,0 +1,77 @@ +package predicate + +import ( + "slices" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// SecretNamePredicate implements a predicate function that returns true if the Secret matches the expected +// namespace and one of the expected names. +type SecretNamePredicate struct { + predicate.Funcs + Namespace string + SecretNames []string +} + +// Create filters CreateEvents based on the Secret name. +func (sp SecretNamePredicate) Create(e event.CreateEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Update filters UpdateEvents based on the Secret name. +func (sp SecretNamePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectNew == nil { + return false + } + + if secret, ok := e.ObjectNew.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Delete filters DeleteEvents based on the Secret name. +func (sp SecretNamePredicate) Delete(e event.DeleteEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Generic filters GenericEvents based on the Secret name. +func (sp SecretNamePredicate) Generic(e event.GenericEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +func secretMatches(secret *corev1.Secret, namespace string, names []string) bool { + if secret.GetNamespace() != namespace { + return false + } + + return slices.Contains(names, secret.GetName()) +} diff --git a/internal/framework/controller/predicate/secret_test.go b/internal/framework/controller/predicate/secret_test.go new file mode 100644 index 0000000000..a9574dbb8c --- /dev/null +++ b/internal/framework/controller/predicate/secret_test.go @@ -0,0 +1,194 @@ +package predicate + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestSecretNamePredicate(t *testing.T) { + t.Parallel() + + pred := SecretNamePredicate{ + Namespace: "test-namespace", + SecretNames: []string{"secret1", "secret2"}, + } + + tests := []struct { + createEvent *event.CreateEvent + updateEvent *event.UpdateEvent + deleteEvent *event.DeleteEvent + genericEvent *event.GenericEvent + name string + expUpdate bool + }{ + { + name: "Create event with matching secret", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Create event with non-matching secret", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Create event with non-matching namespace", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Update event with matching secret", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret2", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Update event with non-matching secret", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Update event with non-matching namespace", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Delete event with matching secret", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Delete event with non-matching secret", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Delete event with non-matching namespace", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Generic event with matching secret", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Generic event with non-matching secret", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Generic event with non-matching namespace", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + var result bool + switch { + case test.createEvent != nil: + result = pred.Create(*test.createEvent) + case test.updateEvent != nil: + result = pred.Update(*test.updateEvent) + case test.deleteEvent != nil: + result = pred.Delete(*test.deleteEvent) + default: + result = pred.Generic(*test.genericEvent) + } + + g.Expect(test.expUpdate).To(Equal(result)) + }) + } +} diff --git a/internal/framework/controller/predicate/service.go b/internal/framework/controller/predicate/service.go index 04eea8f5d2..21e59e6ee0 100644 --- a/internal/framework/controller/predicate/service.go +++ b/internal/framework/controller/predicate/service.go @@ -2,9 +2,7 @@ package predicate import ( apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -65,54 +63,3 @@ func (ServicePortsChangedPredicate) Update(e event.UpdateEvent) bool { return len(newPortSet) > 0 } - -// GatewayServicePredicate implements predicate functions for this Pod's Service. -type GatewayServicePredicate struct { - predicate.Funcs - NSName types.NamespacedName -} - -// Update implements the default UpdateEvent filter for the Gateway Service. -func (gsp GatewayServicePredicate) Update(e event.UpdateEvent) bool { - if e.ObjectOld == nil { - return false - } - if e.ObjectNew == nil { - return false - } - - oldSvc, ok := e.ObjectOld.(*apiv1.Service) - if !ok { - return false - } - - newSvc, ok := e.ObjectNew.(*apiv1.Service) - if !ok { - return false - } - - if client.ObjectKeyFromObject(newSvc) != gsp.NSName { - return false - } - - if oldSvc.Spec.Type != newSvc.Spec.Type { - return true - } - - if newSvc.Spec.Type == apiv1.ServiceTypeLoadBalancer { - oldIngress := oldSvc.Status.LoadBalancer.Ingress - newIngress := newSvc.Status.LoadBalancer.Ingress - - if len(oldIngress) != len(newIngress) { - return true - } - - for i, ingress := range oldIngress { - if ingress.IP != newIngress[i].IP || ingress.Hostname != newIngress[i].Hostname { - return true - } - } - } - - return false -} diff --git a/internal/framework/controller/predicate/service_test.go b/internal/framework/controller/predicate/service_test.go index 98176774ec..fcb4aa694f 100644 --- a/internal/framework/controller/predicate/service_test.go +++ b/internal/framework/controller/predicate/service_test.go @@ -5,8 +5,6 @@ import ( . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -245,223 +243,7 @@ func TestServicePortsChangedPredicate(t *testing.T) { t.Parallel() g := NewWithT(t) - p := GatewayServicePredicate{} - - g.Expect(p.Delete(event.DeleteEvent{Object: &v1.Service{}})).To(BeTrue()) - g.Expect(p.Create(event.CreateEvent{Object: &v1.Service{}})).To(BeTrue()) - g.Expect(p.Generic(event.GenericEvent{Object: &v1.Service{}})).To(BeTrue()) -} - -func TestGatewayServicePredicate_Update(t *testing.T) { - t.Parallel() - testcases := []struct { - objectOld client.Object - objectNew client.Object - msg string - expUpdate bool - }{ - { - msg: "nil objectOld", - objectOld: nil, - objectNew: &v1.Service{}, - expUpdate: false, - }, - { - msg: "nil objectNew", - objectOld: &v1.Service{}, - objectNew: nil, - expUpdate: false, - }, - { - msg: "non-Service objectOld", - objectOld: &v1.Namespace{}, - objectNew: &v1.Service{}, - expUpdate: false, - }, - { - msg: "non-Service objectNew", - objectOld: &v1.Service{}, - objectNew: &v1.Namespace{}, - expUpdate: false, - }, - { - msg: "Service not watched", - objectOld: &v1.Service{}, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "not-watched", - }, - }, - expUpdate: false, - }, - { - msg: "something irrelevant changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.4", - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - ClusterIP: "5.6.7.8", - }, - }, - expUpdate: false, - }, - { - msg: "type changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, - }, - expUpdate: true, - }, - { - msg: "ingress changed length", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - { - IP: "5.6.7.8", - }, - }, - }, - }, - }, - expUpdate: true, - }, - { - msg: "IP address changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - }, - expUpdate: true, - }, - { - msg: "Hostname changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - Hostname: "one", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - Hostname: "two", - }, - }, - }, - }, - }, - expUpdate: true, - }, - } - - p := GatewayServicePredicate{NSName: types.NamespacedName{Namespace: "nginx-gateway", Name: "nginx"}} - - for _, tc := range testcases { - t.Run(tc.msg, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - update := p.Update(event.UpdateEvent{ - ObjectOld: tc.objectOld, - ObjectNew: tc.objectNew, - }) - - g.Expect(update).To(Equal(tc.expUpdate)) - }) - } -} - -func TestGatewayServicePredicate(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - p := GatewayServicePredicate{} + p := ServicePortsChangedPredicate{} g.Expect(p.Delete(event.DeleteEvent{Object: &v1.Service{}})).To(BeTrue()) g.Expect(p.Create(event.CreateEvent{Object: &v1.Service{}})).To(BeTrue()) diff --git a/internal/framework/controller/register.go b/internal/framework/controller/register.go index c76db1f577..557438da98 100644 --- a/internal/framework/controller/register.go +++ b/internal/framework/controller/register.go @@ -96,7 +96,7 @@ func Register( } for field, indexerFunc := range cfg.fieldIndices { - if err := addIndex( + if err := AddIndex( ctx, mgr.GetFieldIndexer(), objectType, @@ -136,7 +136,7 @@ func Register( return nil } -func addIndex( +func AddIndex( ctx context.Context, indexer client.FieldIndexer, objectType ngftypes.ObjectType, diff --git a/internal/framework/controller/resource.go b/internal/framework/controller/resource.go new file mode 100644 index 0000000000..464d2ee90f --- /dev/null +++ b/internal/framework/controller/resource.go @@ -0,0 +1,22 @@ +package controller + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// CreateNginxResourceName creates the base resource name for all nginx resources +// created by the control plane. +func CreateNginxResourceName(prefix, suffix string) string { + return fmt.Sprintf("%s-%s", prefix, suffix) +} + +// ObjectMetaToNamespacedName converts ObjectMeta to NamespacedName. +func ObjectMetaToNamespacedName(meta metav1.ObjectMeta) types.NamespacedName { + return types.NamespacedName{ + Namespace: meta.Namespace, + Name: meta.Name, + } +} diff --git a/internal/framework/file/file.go b/internal/framework/file/file.go new file mode 100644 index 0000000000..555731ba45 --- /dev/null +++ b/internal/framework/file/file.go @@ -0,0 +1,134 @@ +package file + +import ( + "errors" + "fmt" + "io" + "os" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" +) + +//go:generate go tool counterfeiter -generate + +const ( + // RegularFileModeInt defines the default file mode for regular files as an integer. + RegularFileModeInt = 0o644 + // RegularFileMode defines the default file mode for regular files. + RegularFileMode = "0644" + // secretFileMode defines the default file mode for files with secrets as an integer. + secretFileModeInt = 0o640 + // SecretFileMode defines the default file mode for files with secrets. + SecretFileMode = "0640" +) + +// Type is the type of File. +type Type int + +func (t Type) String() string { + switch t { + case TypeRegular: + return "Regular" + case TypeSecret: + return "Secret" + default: + return fmt.Sprintf("Unknown Type %d", t) + } +} + +const ( + // TypeRegular is the type for regular configuration files. + TypeRegular Type = iota + // TypeSecret is the type for secret files. + TypeSecret +) + +// File is a file that is part of NGINX configuration to be written to the file system. +type File struct { + Path string + Content []byte + Type Type +} + +//counterfeiter:generate . OSFileManager + +// OSFileManager is an interface that exposes File I/O operations. +type OSFileManager interface { + // Create file at the provided filepath. + Create(name string) (*os.File, error) + // Chmod sets the mode of the file. + Chmod(file *os.File, mode os.FileMode) error + // Write writes contents to the file. + Write(file *os.File, contents []byte) error + // Open opens the file. + Open(name string) (*os.File, error) + // Copy copies from src to dst. + Copy(dst io.Writer, src io.Reader) error +} + +func Write(fileMgr OSFileManager, file File) error { + ensureType(file.Type) + + f, err := fileMgr.Create(file.Path) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", file.Path, err) + } + + var resultErr error + + defer func() { + if err := f.Close(); err != nil { + resultErr = errors.Join(resultErr, fmt.Errorf("failed to close file %q: %w", file.Path, err)) + } + }() + + switch file.Type { + case TypeRegular: + if err := fileMgr.Chmod(f, RegularFileModeInt); err != nil { + resultErr = fmt.Errorf( + "failed to set file mode to %#o for %q: %w", RegularFileModeInt, file.Path, err) + return resultErr + } + case TypeSecret: + if err := fileMgr.Chmod(f, secretFileModeInt); err != nil { + resultErr = fmt.Errorf("failed to set file mode to %#o for %q: %w", secretFileModeInt, file.Path, err) + return resultErr + } + default: + panic(fmt.Sprintf("unknown file type %d", file.Type)) + } + + if err := fileMgr.Write(f, file.Content); err != nil { + resultErr = fmt.Errorf("failed to write file %q: %w", file.Path, err) + return resultErr + } + + return resultErr +} + +func ensureType(fileType Type) { + if fileType != TypeRegular && fileType != TypeSecret { + panic(fmt.Sprintf("unknown file type %d", fileType)) + } +} + +// Convert an agent File to an internal File type. +func Convert(agentFile agent.File) File { + if agentFile.Meta == nil { + return File{} + } + + var t Type + switch agentFile.Meta.Permissions { + case RegularFileMode: + t = TypeRegular + case SecretFileMode: + t = TypeSecret + } + + return File{ + Content: agentFile.Contents, + Path: agentFile.Meta.Name, + Type: t, + } +} diff --git a/internal/mode/static/nginx/file/file_suite_test.go b/internal/framework/file/file_suite_test.go similarity index 100% rename from internal/mode/static/nginx/file/file_suite_test.go rename to internal/framework/file/file_suite_test.go diff --git a/internal/framework/file/file_test.go b/internal/framework/file/file_test.go new file mode 100644 index 0000000000..c67678bbad --- /dev/null +++ b/internal/framework/file/file_test.go @@ -0,0 +1,188 @@ +package file_test + +import ( + "errors" + "os" + "path/filepath" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file/filefakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" +) + +var _ = Describe("Write files", Ordered, func() { + var ( + mgr file.OSFileManager + tmpDir string + regular1, regular2, secret file.File + ) + + ensureFiles := func(files []file.File) { + entries, err := os.ReadDir(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(entries).Should(HaveLen(len(files))) + + entriesMap := make(map[string]os.DirEntry) + for _, entry := range entries { + entriesMap[entry.Name()] = entry + } + + for _, f := range files { + _, ok := entriesMap[filepath.Base(f.Path)] + Expect(ok).Should(BeTrue()) + + info, err := os.Stat(f.Path) + Expect(err).ToNot(HaveOccurred()) + + Expect(info.IsDir()).To(BeFalse()) + + if f.Type == file.TypeRegular { + Expect(info.Mode()).To(Equal(os.FileMode(0o644))) + } else { + Expect(info.Mode()).To(Equal(os.FileMode(0o640))) + } + + bytes, err := os.ReadFile(f.Path) + Expect(err).ToNot(HaveOccurred()) + Expect(bytes).To(Equal(f.Content)) + } + } + + BeforeAll(func() { + mgr = file.NewStdLibOSFileManager() + tmpDir = GinkgoT().TempDir() + + regular1 = file.File{ + Type: file.TypeRegular, + Path: filepath.Join(tmpDir, "regular-1.conf"), + Content: []byte("regular-1"), + } + regular2 = file.File{ + Type: file.TypeRegular, + Path: filepath.Join(tmpDir, "regular-2.conf"), + Content: []byte("regular-2"), + } + secret = file.File{ + Type: file.TypeSecret, + Path: filepath.Join(tmpDir, "secret.conf"), + Content: []byte("secret"), + } + }) + + It("should write files", func() { + files := []file.File{regular1, regular2, secret} + + for _, f := range files { + Expect(file.Write(mgr, f)).To(Succeed()) + } + + ensureFiles(files) + }) + + When("file type is not supported", func() { + It("should panic", func() { + mgr = file.NewStdLibOSFileManager() + + f := file.File{ + Type: 123, + Path: "unsupported.conf", + } + + replace := func() { + _ = file.Write(mgr, f) + } + + Expect(replace).Should(Panic()) + }) + }) + + Describe("Edge cases with IO errors", func() { + var ( + files = []file.File{ + { + Type: file.TypeRegular, + Path: "regular.conf", + Content: []byte("regular"), + }, + { + Type: file.TypeSecret, + Path: "secret.conf", + Content: []byte("secret"), + }, + } + errTest = errors.New("test error") + ) + + DescribeTable( + "should return error on file IO error", + func(fakeOSMgr *filefakes.FakeOSFileManager) { + mgr := fakeOSMgr + + for _, f := range files { + err := file.Write(mgr, f) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(errTest)) + } + }, + Entry( + "Create", + &filefakes.FakeOSFileManager{ + CreateStub: func(_ string) (*os.File, error) { + return nil, errTest + }, + }, + ), + Entry( + "Chmod", + &filefakes.FakeOSFileManager{ + ChmodStub: func(_ *os.File, _ os.FileMode) error { + return errTest + }, + }, + ), + Entry( + "Write", + &filefakes.FakeOSFileManager{ + WriteStub: func(_ *os.File, _ []byte) error { + return errTest + }, + }, + ), + ) + }) + + It("converts agent files to internal files", func() { + agentFile := agent.File{ + Contents: []byte("file contents"), + Meta: &pb.FileMeta{ + Name: "regular-file", + Permissions: file.RegularFileMode, + }, + } + expFile := file.File{ + Path: "regular-file", + Content: []byte("file contents"), + Type: file.TypeRegular, + } + + secretAgentFile := agent.File{ + Contents: []byte("secret contents"), + Meta: &pb.FileMeta{ + Name: "secret-file", + Permissions: file.SecretFileMode, + }, + } + expSecretFile := file.File{ + Path: "secret-file", + Content: []byte("secret contents"), + Type: file.TypeSecret, + } + + Expect(file.Convert(agentFile)).To(Equal(expFile)) + Expect(file.Convert(secretAgentFile)).To(Equal(expSecretFile)) + }) +}) diff --git a/internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go b/internal/framework/file/filefakes/fake_osfile_manager.go similarity index 72% rename from internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go rename to internal/framework/file/filefakes/fake_osfile_manager.go index 9f63e0a025..52bf2fb1ad 100644 --- a/internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go +++ b/internal/framework/file/filefakes/fake_osfile_manager.go @@ -3,11 +3,10 @@ package filefakes import ( "io" - "io/fs" "os" "sync" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" ) type FakeOSFileManager struct { @@ -61,30 +60,6 @@ type FakeOSFileManager struct { result1 *os.File result2 error } - ReadDirStub func(string) ([]fs.DirEntry, error) - readDirMutex sync.RWMutex - readDirArgsForCall []struct { - arg1 string - } - readDirReturns struct { - result1 []fs.DirEntry - result2 error - } - readDirReturnsOnCall map[int]struct { - result1 []fs.DirEntry - result2 error - } - RemoveStub func(string) error - removeMutex sync.RWMutex - removeArgsForCall []struct { - arg1 string - } - removeReturns struct { - result1 error - } - removeReturnsOnCall map[int]struct { - result1 error - } WriteStub func(*os.File, []byte) error writeMutex sync.RWMutex writeArgsForCall []struct { @@ -353,131 +328,6 @@ func (fake *FakeOSFileManager) OpenReturnsOnCall(i int, result1 *os.File, result }{result1, result2} } -func (fake *FakeOSFileManager) ReadDir(arg1 string) ([]fs.DirEntry, error) { - fake.readDirMutex.Lock() - ret, specificReturn := fake.readDirReturnsOnCall[len(fake.readDirArgsForCall)] - fake.readDirArgsForCall = append(fake.readDirArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadDirStub - fakeReturns := fake.readDirReturns - fake.recordInvocation("ReadDir", []interface{}{arg1}) - fake.readDirMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeOSFileManager) ReadDirCallCount() int { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - return len(fake.readDirArgsForCall) -} - -func (fake *FakeOSFileManager) ReadDirCalls(stub func(string) ([]fs.DirEntry, error)) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = stub -} - -func (fake *FakeOSFileManager) ReadDirArgsForCall(i int) string { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - argsForCall := fake.readDirArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeOSFileManager) ReadDirReturns(result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - fake.readDirReturns = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeOSFileManager) ReadDirReturnsOnCall(i int, result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - if fake.readDirReturnsOnCall == nil { - fake.readDirReturnsOnCall = make(map[int]struct { - result1 []fs.DirEntry - result2 error - }) - } - fake.readDirReturnsOnCall[i] = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeOSFileManager) Remove(arg1 string) error { - fake.removeMutex.Lock() - ret, specificReturn := fake.removeReturnsOnCall[len(fake.removeArgsForCall)] - fake.removeArgsForCall = append(fake.removeArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveStub - fakeReturns := fake.removeReturns - fake.recordInvocation("Remove", []interface{}{arg1}) - fake.removeMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeOSFileManager) RemoveCallCount() int { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - return len(fake.removeArgsForCall) -} - -func (fake *FakeOSFileManager) RemoveCalls(stub func(string) error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = stub -} - -func (fake *FakeOSFileManager) RemoveArgsForCall(i int) string { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - argsForCall := fake.removeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeOSFileManager) RemoveReturns(result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - fake.removeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeOSFileManager) RemoveReturnsOnCall(i int, result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - if fake.removeReturnsOnCall == nil { - fake.removeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.removeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeOSFileManager) Write(arg1 *os.File, arg2 []byte) error { var arg2Copy []byte if arg2 != nil { @@ -556,10 +406,6 @@ func (fake *FakeOSFileManager) Invocations() map[string][][]interface{} { defer fake.createMutex.RUnlock() fake.openMutex.RLock() defer fake.openMutex.RUnlock() - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() fake.writeMutex.RLock() defer fake.writeMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} diff --git a/internal/mode/static/nginx/file/os_filemanager.go b/internal/framework/file/os_filemanager.go similarity index 100% rename from internal/mode/static/nginx/file/os_filemanager.go rename to internal/framework/file/os_filemanager.go diff --git a/internal/framework/runnables/runnables.go b/internal/framework/runnables/runnables.go index d960475008..8304c326c0 100644 --- a/internal/framework/runnables/runnables.go +++ b/internal/framework/runnables/runnables.go @@ -34,29 +34,34 @@ func (r *LeaderOrNonLeader) NeedLeaderElection() bool { return false } -// EnableAfterBecameLeader is a Runnable that will call the enable function when the current instance becomes +// CallFunctionsAfterBecameLeader is a Runnable that will call the given functions when the current instance becomes // the leader. -type EnableAfterBecameLeader struct { - enable func(context.Context) +type CallFunctionsAfterBecameLeader struct { + enableFunctions []func(context.Context) } var ( - _ manager.LeaderElectionRunnable = &EnableAfterBecameLeader{} - _ manager.Runnable = &EnableAfterBecameLeader{} + _ manager.LeaderElectionRunnable = &CallFunctionsAfterBecameLeader{} + _ manager.Runnable = &CallFunctionsAfterBecameLeader{} ) -// NewEnableAfterBecameLeader creates a new EnableAfterBecameLeader Runnable. -func NewEnableAfterBecameLeader(enable func(context.Context)) *EnableAfterBecameLeader { - return &EnableAfterBecameLeader{ - enable: enable, +// NewCallFunctionsAfterBecameLeader creates a new CallFunctionsAfterBecameLeader Runnable. +func NewCallFunctionsAfterBecameLeader( + enableFunctions []func(context.Context), +) *CallFunctionsAfterBecameLeader { + return &CallFunctionsAfterBecameLeader{ + enableFunctions: enableFunctions, } } -func (j *EnableAfterBecameLeader) Start(ctx context.Context) error { - j.enable(ctx) +func (j *CallFunctionsAfterBecameLeader) Start(ctx context.Context) error { + for _, f := range j.enableFunctions { + f(ctx) + } + return nil } -func (j *EnableAfterBecameLeader) NeedLeaderElection() bool { +func (j *CallFunctionsAfterBecameLeader) NeedLeaderElection() bool { return true } diff --git a/internal/framework/runnables/runnables_test.go b/internal/framework/runnables/runnables_test.go index 9f34d9ccba..6da01a0236 100644 --- a/internal/framework/runnables/runnables_test.go +++ b/internal/framework/runnables/runnables_test.go @@ -23,19 +23,22 @@ func TestLeaderOrNonLeader(t *testing.T) { g.Expect(leaderOrNonLeader.NeedLeaderElection()).To(BeFalse()) } -func TestEnableAfterBecameLeader(t *testing.T) { +func TestCallFunctionsAfterBecameLeader(t *testing.T) { t.Parallel() - enabled := false - enableAfterBecameLeader := NewEnableAfterBecameLeader(func(_ context.Context) { - enabled = true + statusUpdaterEnabled := false + provisionerEnabled := false + + callFunctionsAfterBecameLeader := NewCallFunctionsAfterBecameLeader([]func(ctx context.Context){ + func(_ context.Context) { statusUpdaterEnabled = true }, + func(_ context.Context) { provisionerEnabled = true }, }) g := NewWithT(t) - g.Expect(enableAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - g.Expect(enabled).To(BeFalse()) + g.Expect(callFunctionsAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - err := enableAfterBecameLeader.Start(context.Background()) + err := callFunctionsAfterBecameLeader.Start(context.Background()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(enabled).To(BeTrue()) + g.Expect(statusUpdaterEnabled).To(BeTrue()) + g.Expect(provisionerEnabled).To(BeTrue()) } diff --git a/internal/mode/provisioner/deployment.go b/internal/mode/provisioner/deployment.go deleted file mode 100644 index 6de1579595..0000000000 --- a/internal/mode/provisioner/deployment.go +++ /dev/null @@ -1,43 +0,0 @@ -package provisioner - -import ( - "fmt" - "strings" - - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// prepareDeployment prepares a new the static mode Deployment based on the YAML manifest. -// It will use the specified id to set unique parts of the deployment, so it must be unique among all Deployments for -// Gateways. -// It will configure the Deployment to use the Gateway with the given NamespacedName. -func prepareDeployment(depYAML []byte, id string, gwNsName types.NamespacedName) (*v1.Deployment, error) { - dep := &v1.Deployment{} - if err := yaml.Unmarshal(depYAML, dep); err != nil { - return nil, fmt.Errorf("failed to unmarshal deployment: %w", err) - } - - dep.Name = id - dep.Spec.Selector.MatchLabels["app"] = id - dep.Spec.Template.Labels["app"] = id - - finalArgs := []string{ - "--gateway=" + gwNsName.String(), - "--update-gatewayclass-status=false", - } - - for _, arg := range dep.Spec.Template.Spec.Containers[0].Args { - if strings.Contains(arg, "leader-election-lock-name") { - lockNameArg := "--leader-election-lock-name=" + gwNsName.Name - finalArgs = append(finalArgs, lockNameArg) - } else { - finalArgs = append(finalArgs, arg) - } - } - - dep.Spec.Template.Spec.Containers[0].Args = finalArgs - - return dep, nil -} diff --git a/internal/mode/provisioner/doc.go b/internal/mode/provisioner/doc.go deleted file mode 100644 index 589ab68527..0000000000 --- a/internal/mode/provisioner/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package provisioner contains all the packages that relate to the provisioner-mode implementation of NGF. -Provisioner-mode implements data plane provisioning for NGINX Gateway Fabric (NGF): it creates an NGF static mode -Deployment for each Gateway that belongs to the provisioner GatewayClass. -*/ -package provisioner diff --git a/internal/mode/provisioner/handler.go b/internal/mode/provisioner/handler.go deleted file mode 100644 index b31145bd37..0000000000 --- a/internal/mode/provisioner/handler.go +++ /dev/null @@ -1,186 +0,0 @@ -package provisioner - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" -) - -type timeNowFunc func() metav1.Time - -// eventHandler ensures each Gateway for the specific GatewayClass has a corresponding Deployment -// of NGF configured to use that specific Gateway. -// -// eventHandler implements events.Handler interface. -type eventHandler struct { - gcName string - store *store - - // provisions maps NamespacedName of Gateway to its corresponding Deployment - provisions map[types.NamespacedName]*v1.Deployment - - statusUpdater *status.Updater - k8sClient client.Client - timeNow timeNowFunc - - staticModeDeploymentYAML []byte - - gatewayNextID int64 -} - -func newEventHandler( - gcName string, - statusUpdater *status.Updater, - k8sClient client.Client, - staticModeDeploymentYAML []byte, - timeNow timeNowFunc, -) *eventHandler { - return &eventHandler{ - store: newStore(), - provisions: make(map[types.NamespacedName]*v1.Deployment), - statusUpdater: statusUpdater, - gcName: gcName, - k8sClient: k8sClient, - staticModeDeploymentYAML: staticModeDeploymentYAML, - gatewayNextID: 1, - timeNow: timeNow, - } -} - -func (h *eventHandler) setGatewayClassStatuses(ctx context.Context) { - var reqs []status.UpdateRequest - - var gcExists bool - - for nsname, gc := range h.store.gatewayClasses { - // The order of conditions matters. Default conditions are added first so that any additional conditions will - // override them, which is ensured by DeduplicateConditions. - conds := conditions.NewDefaultGatewayClassConditions() - - if gc.Name == h.gcName { - gcExists = true - } else { - conds = append(conds, conditions.NewGatewayClassConflict()) - } - - // We ignore the boolean return value here because the provisioner only sets status, - // it does not generate config. - supportedVersionConds, _ := gatewayclass.ValidateCRDVersions(h.store.crdMetadata) - conds = append(conds, supportedVersionConds...) - - reqs = append(reqs, status.UpdateRequest{ - NsName: nsname, - ResourceType: &gatewayv1.GatewayClass{}, - Setter: func(obj client.Object) bool { - gc := helpers.MustCastObject[*gatewayv1.GatewayClass](obj) - - gcs := gatewayv1.GatewayClassStatus{ - Conditions: conditions.ConvertConditions(conditions.DeduplicateConditions(conds), gc.Generation, h.timeNow()), - } - - if status.ConditionsEqual(gc.Status.Conditions, gcs.Conditions) { - return false - } - - gc.Status = gcs - - return true - }, - }) - } - - if !gcExists { - panic(fmt.Errorf("GatewayClass %s must exist", h.gcName)) - } - - h.statusUpdater.Update(ctx, reqs...) -} - -func (h *eventHandler) ensureDeploymentsMatchGateways(ctx context.Context, logger logr.Logger) { - var gwsWithoutDeps, removedGwsWithDeps []types.NamespacedName - - for nsname, gw := range h.store.gateways { - if string(gw.Spec.GatewayClassName) != h.gcName { - continue - } - if _, exist := h.provisions[nsname]; exist { - continue - } - - gwsWithoutDeps = append(gwsWithoutDeps, nsname) - } - - for nsname := range h.provisions { - if _, exist := h.store.gateways[nsname]; exist { - continue - } - - removedGwsWithDeps = append(removedGwsWithDeps, nsname) - } - - // Create new deployments - - for _, nsname := range gwsWithoutDeps { - deployment, err := prepareDeployment(h.staticModeDeploymentYAML, h.generateDeploymentID(), nsname) - if err != nil { - panic(fmt.Errorf("failed to prepare deployment: %w", err)) - } - - if err = h.k8sClient.Create(ctx, deployment); err != nil { - panic(fmt.Errorf("failed to create deployment: %w", err)) - } - - h.provisions[nsname] = deployment - - logger.Info( - "Created deployment", - "deployment", client.ObjectKeyFromObject(deployment), - "gateway", nsname, - ) - } - - // Remove unnecessary deployments - - for _, nsname := range removedGwsWithDeps { - deployment := h.provisions[nsname] - - if err := h.k8sClient.Delete(ctx, deployment); err != nil { - panic(fmt.Errorf("failed to delete deployment: %w", err)) - } - - delete(h.provisions, nsname) - - logger.Info( - "Deleted deployment", - "deployment", client.ObjectKeyFromObject(deployment), - "gateway", nsname, - ) - } -} - -func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, batch events.EventBatch) { - h.store.update(batch) - h.setGatewayClassStatuses(ctx) - h.ensureDeploymentsMatchGateways(ctx, logger) -} - -func (h *eventHandler) generateDeploymentID() string { - // This approach will break if the provisioner is restarted, because the existing Gateways might get - // IDs different from the previous replica of the provisioner. - id := h.gatewayNextID - h.gatewayNextID++ - - return fmt.Sprintf("nginx-gateway-%d", id) -} diff --git a/internal/mode/provisioner/handler_test.go b/internal/mode/provisioner/handler_test.go deleted file mode 100644 index 97c870179e..0000000000 --- a/internal/mode/provisioner/handler_test.go +++ /dev/null @@ -1,570 +0,0 @@ -package provisioner - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - v1 "k8s.io/api/apps/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - . "github.com/onsi/gomega" - - embeddedfiles "github.com/nginx/nginx-gateway-fabric" - "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" -) - -var _ = Describe("handler", func() { - const ( - gcName = "test-gc" - ) - var ( - handler *eventHandler - - statusUpdater *status.Updater - k8sclient client.Client - crd *metav1.PartialObjectMetadata - gc *gatewayv1.GatewayClass - - fakeTimeNow timeNowFunc - ) - - BeforeEach(OncePerOrdered, func() { - scheme := runtime.NewScheme() - - Expect(gatewayv1.Install(scheme)).Should(Succeed()) - Expect(v1.AddToScheme(scheme)).Should(Succeed()) - Expect(apiext.AddToScheme(scheme)).Should(Succeed()) - - k8sclient = fake.NewClientBuilder(). - WithScheme(scheme). - WithStatusSubresource( - &gatewayv1.Gateway{}, - &gatewayv1.GatewayClass{}, - ). - Build() - - fakeTime := helpers.PrepareTimeForFakeClient(metav1.Now()) - fakeTimeNow = func() metav1.Time { - return fakeTime - } - - statusUpdater = status.NewUpdater(k8sclient, logr.Discard()) - - // Add GatewayClass CRD to the cluster - crd = &metav1.PartialObjectMetadata{ - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "gatewayclasses.gateway.networking.k8s.io", - Annotations: map[string]string{ - gatewayclass.BundleVersionAnnotation: gatewayclass.SupportedVersion, - }, - }, - } - - err := k8sclient.Create(context.Background(), crd) - Expect(err).ToNot(HaveOccurred()) - - gc = &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: gcName, - }, - } - }) - - createGateway := func(gwNsName types.NamespacedName) *gatewayv1.Gateway { - return &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: gwNsName.Namespace, - Name: gwNsName.Name, - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: gcName, - }, - } - } - - itShouldUpsertGatewayClass := func() { - // Add GatewayClass to the cluster - - err := k8sclient.Create(context.Background(), gc) - Expect(err).ToNot(HaveOccurred()) - - // UpsertGatewayClass and CRD - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: gc, - }, - &events.UpsertEvent{ - Resource: crd, - }, - } - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - // Ensure GatewayClass is accepted - - clusterGc := &gatewayv1.GatewayClass{} - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(gc), clusterGc) - - Expect(err).ToNot(HaveOccurred()) - - expectedConditions := []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "Accepted", - Message: "GatewayClass is accepted", - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "SupportedVersion", - Message: "Gateway API CRD versions are supported", - }, - } - - Expect(clusterGc.Status.Conditions).To(Equal(expectedConditions)) - } - - itShouldUpsertGateway := func(gwNsName types.NamespacedName, seqNumber int64) { - batch := []interface{}{ - &events.UpsertEvent{ - Resource: createGateway(gwNsName), - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - depNsName := types.NamespacedName{ - Namespace: "nginx-gateway", - Name: fmt.Sprintf("nginx-gateway-%d", seqNumber), - } - - dep := &v1.Deployment{} - err := k8sclient.Get(context.Background(), depNsName, dep) - - Expect(err).ToNot(HaveOccurred()) - - Expect(dep.ObjectMeta.Namespace).To(Equal("nginx-gateway")) - Expect(dep.ObjectMeta.Name).To(Equal(depNsName.Name)) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement("static-mode")) - expectedGwFlag := fmt.Sprintf("--gateway=%s", gwNsName.String()) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement(expectedGwFlag)) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement("--update-gatewayclass-status=false")) - expectedLockFlag := fmt.Sprintf("--leader-election-lock-name=%s", gwNsName.Name) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement(expectedLockFlag)) - } - - itShouldUpsertCRD := func(version string, accepted bool) { - updatedCRD := crd - updatedCRD.Annotations[gatewayclass.BundleVersionAnnotation] = version - - err := k8sclient.Update(context.Background(), updatedCRD) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: updatedCRD, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - updatedGC := &gatewayv1.GatewayClass{} - - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(gc), updatedGC) - Expect(err).ToNot(HaveOccurred()) - - var expConds []metav1.Condition - if !accepted { - expConds = []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not supported. "+ - "Please install version %s", gatewayclass.SupportedVersion), - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not supported. "+ - "Please install version %s", gatewayclass.SupportedVersion), - }, - } - } else { - expConds = []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonAccepted), - Message: "GatewayClass is accepted", - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not recommended. "+ - "Recommended version is %s", gatewayclass.SupportedVersion), - }, - } - } - - Expect(updatedGC.Status.Conditions).To(Equal(expConds)) - } - - itShouldPanicWhenUpsertingGateway := func(gwNsName types.NamespacedName) { - batch := []interface{}{ - &events.UpsertEvent{ - Resource: createGateway(gwNsName), - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - } - - Describe("Core cases", Ordered, func() { - var gwNsName1, gwNsName2 types.NamespacedName - - BeforeAll(func() { - gwNsName1 = types.NamespacedName{ - Namespace: "test-ns-1", - Name: "test-gw-1", - } - gwNsName2 = types.NamespacedName{ - Namespace: "test-ns-2", - Name: "test-gw-2", - } - - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - embeddedfiles.StaticModeDeploymentYAML, - fakeTimeNow, - ) - }) - - When("upserting GatewayClass", func() { - It("should make GatewayClass Accepted", func() { - itShouldUpsertGatewayClass() - }) - }) - - When("upserting first Gateway", func() { - It("should create first Deployment", func() { - itShouldUpsertGateway(gwNsName1, 1) - }) - }) - - When("upserting first Gateway again", func() { - It("must retain Deployment", func() { - itShouldUpsertGateway(gwNsName1, 1) - }) - }) - - When("upserting second Gateway", func() { - It("should create second Deployment", func() { - itShouldUpsertGateway(gwNsName2, 2) - }) - }) - - When("deleting first Gateway", func() { - It("should remove first Deployment", func() { - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName1, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - deps := &v1.DeploymentList{} - - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(HaveLen(1)) - Expect(deps.Items[0].ObjectMeta.Name).To(Equal("nginx-gateway-2")) - }) - }) - - When("deleting second Gateway", func() { - It("should remove second Deployment", func() { - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName2, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - deps := &v1.DeploymentList{} - - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(BeEmpty()) - }) - }) - - When("upserting Gateway for a different GatewayClass", func() { - It("should not create Deployment", func() { - gw := &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-gw-3", - Namespace: "test-ns-3", - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: "some-class", - }, - } - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: gw, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - deps := &v1.DeploymentList{} - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(BeEmpty()) - }) - }) - - When("upserting GatewayClass that is not set in command-line argument", func() { - It("should set the proper status if this controller is referenced", func() { - newGC := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "unknown-gc", - }, - Spec: gatewayv1.GatewayClassSpec{ - ControllerName: "test.example.com", - }, - } - - err := k8sclient.Create(context.Background(), newGC) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: newGC, - }, - &events.UpsertEvent{ - Resource: crd, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - unknownGC := &gatewayv1.GatewayClass{} - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(newGC), unknownGC) - Expect(err).ToNot(HaveOccurred()) - - expectedConditions := []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "SupportedVersion", - Message: "Gateway API CRD versions are supported", - }, - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(conditions.GatewayClassReasonGatewayClassConflict), - Message: conditions.GatewayClassMessageGatewayClassConflict, - }, - } - Expect(unknownGC.Status.Conditions).To(Equal(expectedConditions)) - }) - }) - - When("upserting Gateway API CRD that is not a supported major version", func() { - It("should set the SupportedVersion and Accepted statuses to false on GatewayClass", func() { - itShouldUpsertCRD("v99.0.0", false /* accepted */) - }) - }) - - When("upserting Gateway API CRD that is not a supported minor version", func() { - It("should set the SupportedVersion status to false and Accepted status to true on GatewayClass", func() { - itShouldUpsertCRD("1.99.0", true /* accepted */) - }) - }) - }) - - Describe("Edge cases", func() { - var gwNsName types.NamespacedName - - BeforeEach(func() { - gwNsName = types.NamespacedName{ - Namespace: "test-ns", - Name: "test-gw", - } - - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - embeddedfiles.StaticModeDeploymentYAML, - fakeTimeNow, - ) - }) - - DescribeTable("Edge cases for events", - func(e interface{}) { - batch := []interface{}{e} - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }, - Entry("should panic for an unknown event type", - &struct{}{}), - Entry("should panic for an unknown type of resource in upsert event", - &events.UpsertEvent{ - Resource: &gatewayv1.HTTPRoute{}, - }), - Entry("should panic for an unknown type of resource in delete event", - &events.DeleteEvent{ - Type: &gatewayv1.HTTPRoute{}, - }), - ) - - When("upserting Gateway when GatewayClass doesn't exist", func() { - It("should panic", func() { - itShouldPanicWhenUpsertingGateway(gwNsName) - }) - }) - - When("upserting Gateway when Deployment can't be created", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - - // Create a deployment so that the Handler will fail to create it because it already exists. - - dep := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx-gateway-1", - }, - } - - err := k8sclient.Create(context.Background(), dep) - Expect(err).ToNot(HaveOccurred()) - - itShouldPanicWhenUpsertingGateway(gwNsName) - }) - }) - - When("deleting Gateway when Deployment can't be deleted", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - itShouldUpsertGateway(gwNsName, 1) - - // Delete the deployment so that the Handler will fail to delete it because it doesn't exist. - - dep := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx-gateway-1", - }, - } - - err := k8sclient.Delete(context.Background(), dep) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName, - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }) - }) - - When("deleting GatewayClass", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.GatewayClass{}, - NamespacedName: types.NamespacedName{ - Name: gcName, - }, - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }) - }) - - When("upserting Gateway with broken static Deployment YAML", func() { - It("it should panic", func() { - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - []byte("broken YAML"), - fakeTimeNow, - ) - - itShouldUpsertGatewayClass() - itShouldPanicWhenUpsertingGateway(types.NamespacedName{Namespace: "test-ns", Name: "test-gw"}) - }) - }) - }) -}) diff --git a/internal/mode/provisioner/manager.go b/internal/mode/provisioner/manager.go deleted file mode 100644 index bb2b93b6e5..0000000000 --- a/internal/mode/provisioner/manager.go +++ /dev/null @@ -1,152 +0,0 @@ -package provisioner - -import ( - "fmt" - - "github.com/go-logr/logr" - v1 "k8s.io/api/apps/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - ctlr "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - embeddedfiles "github.com/nginx/nginx-gateway-fabric" - "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" - "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/predicate" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" - ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" -) - -// Config is configuration for the provisioner mode. -type Config struct { - Logger logr.Logger - GatewayClassName string - GatewayCtlrName string -} - -// StartManager starts a Manager for the provisioner mode, which provisions -// a Deployment of NGF (static mode) for each Gateway of the provisioner GatewayClass. -// -// The provisioner mode is introduced to allow running Gateway API conformance tests for NGF, which expects -// an independent data plane instance being provisioned for each Gateway. -// -// The provisioner mode is not intended to be used in production (in the short term), as it lacks support for -// many important features. See https://github.com/nginx/nginx-gateway-fabric/issues/634 for more details. -func StartManager(cfg Config) error { - scheme := runtime.NewScheme() - utilruntime.Must(gatewayv1.Install(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) - utilruntime.Must(apiext.AddToScheme(scheme)) - - options := manager.Options{ - Scheme: scheme, - Logger: cfg.Logger, - } - clusterCfg := ctlr.GetConfigOrDie() - - mgr, err := manager.New(clusterCfg, options) - if err != nil { - return fmt.Errorf("cannot build runtime manager: %w", err) - } - - crdWithGVK := apiext.CustomResourceDefinition{} - crdWithGVK.SetGroupVersionKind( - schema.GroupVersionKind{Group: apiext.GroupName, Version: "v1", Kind: "CustomResourceDefinition"}, - ) - - // Note: for any new object type or a change to the existing one, - // make sure to also update firstBatchPreparer creation below - controllerRegCfgs := []struct { - objectType ngftypes.ObjectType - options []controller.Option - }{ - { - objectType: &gatewayv1.GatewayClass{}, - options: []controller.Option{ - controller.WithK8sPredicate(predicate.GatewayClassPredicate{ControllerName: cfg.GatewayCtlrName}), - }, - }, - { - objectType: &gatewayv1.Gateway{}, - }, - { - objectType: &crdWithGVK, - options: []controller.Option{ - controller.WithOnlyMetadata(), - controller.WithK8sPredicate( - predicate.AnnotationPredicate{Annotation: gatewayclass.BundleVersionAnnotation}, - ), - }, - }, - } - - ctx := ctlr.SetupSignalHandler() - eventCh := make(chan interface{}) - - for _, regCfg := range controllerRegCfgs { - if err := controller.Register( - ctx, - regCfg.objectType, - regCfg.objectType.GetObjectKind().GroupVersionKind().Kind, - mgr, - eventCh, - regCfg.options..., - ); err != nil { - return fmt.Errorf("cannot register controller for %T: %w", regCfg.objectType, err) - } - } - - partialObjectMetadataList := &metav1.PartialObjectMetadataList{} - partialObjectMetadataList.SetGroupVersionKind( - schema.GroupVersionKind{ - Group: apiext.GroupName, - Version: "v1", - Kind: "CustomResourceDefinition", - }, - ) - - firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( - mgr.GetCache(), - []client.Object{ - &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: cfg.GatewayClassName}}, - }, - []client.ObjectList{ - &gatewayv1.GatewayList{}, - partialObjectMetadataList, - }, - ) - - statusUpdater := status.NewUpdater( - mgr.GetClient(), - cfg.Logger.WithName("statusUpdater"), - ) - - handler := newEventHandler( - cfg.GatewayClassName, - statusUpdater, - mgr.GetClient(), - embeddedfiles.StaticModeDeploymentYAML, - metav1.Now, - ) - - eventLoop := events.NewEventLoop( - eventCh, - cfg.Logger.WithName("eventLoop"), - handler, - firstBatchPreparer, - ) - - if err := mgr.Add(eventLoop); err != nil { - return fmt.Errorf("cannot register event loop: %w", err) - } - - cfg.Logger.Info("Starting manager") - return mgr.Start(ctx) -} diff --git a/internal/mode/provisioner/provisioner_suite_test.go b/internal/mode/provisioner/provisioner_suite_test.go deleted file mode 100644 index 1435a2230e..0000000000 --- a/internal/mode/provisioner/provisioner_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package provisioner - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestProvisioner(t *testing.T) { - t.Parallel() - RegisterFailHandler(Fail) - RunSpecs(t, "Provisioner Suite") -} diff --git a/internal/mode/provisioner/store.go b/internal/mode/provisioner/store.go deleted file mode 100644 index ebc6afcc17..0000000000 --- a/internal/mode/provisioner/store.go +++ /dev/null @@ -1,58 +0,0 @@ -package provisioner - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - v1 "sigs.k8s.io/gateway-api/apis/v1" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" -) - -// store stores the cluster state needed by the provisioner and allows to update it from the events. -type store struct { - gatewayClasses map[types.NamespacedName]*v1.GatewayClass - gateways map[types.NamespacedName]*v1.Gateway - crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata -} - -func newStore() *store { - return &store{ - gatewayClasses: make(map[types.NamespacedName]*v1.GatewayClass), - gateways: make(map[types.NamespacedName]*v1.Gateway), - crdMetadata: make(map[types.NamespacedName]*metav1.PartialObjectMetadata), - } -} - -func (s *store) update(batch events.EventBatch) { - for _, event := range batch { - switch e := event.(type) { - case *events.UpsertEvent: - switch obj := e.Resource.(type) { - case *v1.GatewayClass: - s.gatewayClasses[client.ObjectKeyFromObject(obj)] = obj - case *v1.Gateway: - s.gateways[client.ObjectKeyFromObject(obj)] = obj - case *metav1.PartialObjectMetadata: - s.crdMetadata[client.ObjectKeyFromObject(obj)] = obj - default: - panic(fmt.Errorf("unknown resource type %T", e.Resource)) - } - case *events.DeleteEvent: - switch e.Type.(type) { - case *v1.GatewayClass: - delete(s.gatewayClasses, e.NamespacedName) - case *v1.Gateway: - delete(s.gateways, e.NamespacedName) - case *metav1.PartialObjectMetadata: - delete(s.crdMetadata, e.NamespacedName) - default: - panic(fmt.Errorf("unknown resource type %T", e.Type)) - } - default: - panic(fmt.Errorf("unknown event type %T", e)) - } - } -} diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index 82b4238836..25630eb8f0 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -5,23 +5,19 @@ import ( "github.com/go-logr/logr" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" ) +const DefaultNginxMetricsPort = int32(9113) + type Config struct { // AtomicLevel is an atomically changeable, dynamic logging level. AtomicLevel zap.AtomicLevel // UsageReportConfig specifies the NGINX Plus usage reporting configuration. UsageReportConfig UsageReportConfig - // Version is the running NGF version. - Version string // ImageSource is the source of the NGINX Gateway image. ImageSource string // Flags contains the NGF command-line flag names and values. Flags Flags - // GatewayNsName is the namespaced name of a Gateway resource that the Gateway will use. - // The Gateway will ignore all other Gateway resources. - GatewayNsName *types.NamespacedName // GatewayPodConfig contains information about this Pod. GatewayPodConfig GatewayPodConfig // Logger is the Zap Logger used by all components. @@ -32,6 +28,12 @@ type Config struct { ConfigName string // GatewayClassName is the name of the GatewayClass resource that the Gateway will use. GatewayClassName string + // AgentTLSSecretName is the name of the TLS Secret used by NGINX Agent to communicate with the control plane. + AgentTLSSecretName string + // NGINXSCCName is the name of the SecurityContextConstraints for the NGINX Pods. Only applicable in OpenShift. + NGINXSCCName string + // NginxDockerSecretNames are the names of any Docker registry Secrets for the NGINX container. + NginxDockerSecretNames []string // LeaderElection contains the configuration for leader election. LeaderElection LeaderElectionConfig // ProductTelemetryConfig contains the configuration for collecting product telemetry. @@ -40,8 +42,6 @@ type Config struct { MetricsConfig MetricsConfig // HealthConfig specifies the health probe config. HealthConfig HealthConfig - // UpdateGatewayClassStatus enables updating the status of the GatewayClass resource. - UpdateGatewayClassStatus bool // Plus indicates whether NGINX Plus is being used. Plus bool // ExperimentalFeatures indicates if experimental features are enabled. @@ -52,8 +52,6 @@ type Config struct { // GatewayPodConfig contains information about this Pod. type GatewayPodConfig struct { - // PodIP is the IP address of this Pod. - PodIP string // ServiceName is the name of the Service that fronts this Pod. ServiceName string // Namespace is the namespace of this Pod. @@ -62,6 +60,13 @@ type GatewayPodConfig struct { Name string // UID is the UID of the Pod. UID string + // InstanceName is the name used in the instance label. + // Generally this will be the name of the Helm release. + InstanceName string + // Version is the running NGF version. + Version string + // Image is the image path of the Pod. + Image string } // MetricsConfig specifies the metrics config. diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 54dde7ade8..2e2fe6e9c2 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -4,27 +4,29 @@ import ( "context" "errors" "fmt" + "strings" "sync" "time" "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" frameworkStatus "github.com/nginx/nginx-gateway-fabric/internal/framework/status" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ngxConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -38,12 +40,13 @@ type handlerMetricsCollector interface { // eventHandlerConfig holds configuration parameters for eventHandlerImpl. type eventHandlerConfig struct { - // nginxFileMgr is the file Manager for nginx. - nginxFileMgr file.Manager + ctx context.Context + // nginxUpdater updates nginx configuration using the NGINX agent. + nginxUpdater agent.NginxUpdater + // nginxProvisioner handles provisioning and deprovisioning nginx resources. + nginxProvisioner provisioner.Provisioner // metricsCollector collects metrics for this controller. metricsCollector handlerMetricsCollector - // nginxRuntimeMgr manages nginx runtime. - nginxRuntimeMgr runtime.Manager // statusUpdater updates statuses on Kubernetes resources. statusUpdater frameworkStatus.GroupUpdater // processor is the state ChangeProcessor. @@ -62,16 +65,22 @@ type eventHandlerConfig struct { eventRecorder record.EventRecorder // deployCtxCollector collects the deployment context for N+ licensing deployCtxCollector licensing.Collector - // nginxConfiguredOnStartChecker sets the health of the Pod to Ready once we've written out our initial config. - nginxConfiguredOnStartChecker *nginxConfiguredOnStartChecker + // graphBuiltHealthChecker sets the health of the Pod to Ready once we've built our initial graph. + graphBuiltHealthChecker *graphBuiltHealthChecker + // statusQueue contains updates when the handler should write statuses. + statusQueue *status.Queue + // nginxDeployments contains a map of all nginx Deployments, and data about them. + nginxDeployments *agent.DeploymentStore + // logger is the logger for the event handler. + logger logr.Logger // gatewayPodConfig contains information about this Pod. gatewayPodConfig ngfConfig.GatewayPodConfig // controlConfigNSName is the NamespacedName of the NginxGateway config for this controller. controlConfigNSName types.NamespacedName // gatewayCtlrName is the name of the NGF controller. gatewayCtlrName string - // updateGatewayClassStatus enables updating the status of the GatewayClass resource. - updateGatewayClassStatus bool + // gatewayClassName is the name of the GatewayClass. + gatewayClassName string // plus is whether or not we are running NGINX Plus. plus bool } @@ -101,25 +110,21 @@ type objectFilter struct { // (3) Updating control plane configuration. // (4) Tracks the NGINX Plus usage reporting Secret (if applicable). type eventHandlerImpl struct { - // latestConfiguration is the latest Configuration generation. - latestConfiguration *dataplane.Configuration + // latestConfigurations are the latest Configuration generation for each Gateway tree. + latestConfigurations map[types.NamespacedName]*dataplane.Configuration // objectFilters contains all created objectFilters, with the key being a filterKey objectFilters map[filterKey]objectFilter - latestReloadResult status.NginxReloadResult - cfg eventHandlerConfig lock sync.Mutex - - // version is the current version number of the nginx config. - version int } // newEventHandlerImpl creates a new eventHandlerImpl. func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl { handler := &eventHandlerImpl{ - cfg: cfg, + cfg: cfg, + latestConfigurations: make(map[types.NamespacedName]*dataplane.Configuration), } handler.objectFilters = map[filterKey]objectFilter{ @@ -128,20 +133,10 @@ func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl { upsert: handler.nginxGatewayCRDUpsert, delete: handler.nginxGatewayCRDDelete, }, - // NGF-fronting Service - objectFilterKey( - &v1.Service{}, - types.NamespacedName{ - Name: handler.cfg.gatewayPodConfig.ServiceName, - Namespace: handler.cfg.gatewayPodConfig.Namespace, - }, - ): { - upsert: handler.nginxGatewayServiceUpsert, - delete: handler.nginxGatewayServiceDelete, - captureChangeInGraph: true, - }, } + go handler.waitForStatusUpdates(cfg.ctx) + return handler } @@ -162,82 +157,188 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.parseAndCaptureEvent(ctx, logger, event) } - changeType, gr := h.cfg.processor.Process() + gr := h.cfg.processor.Process() - var err error - switch changeType { - case state.NoChange: - logger.Info("Handling events didn't result into NGINX configuration changes") - if !h.cfg.nginxConfiguredOnStartChecker.ready && h.cfg.nginxConfiguredOnStartChecker.firstBatchError == nil { - h.cfg.nginxConfiguredOnStartChecker.setAsReady() - } + // Once we've processed resources on startup and built our first graph, mark the Pod as ready. + if !h.cfg.graphBuiltHealthChecker.ready { + h.cfg.graphBuiltHealthChecker.setAsReady() + } + + h.sendNginxConfig(ctx, logger, gr) +} + +// enable is called when the pod becomes leader to ensure the provisioner has +// the latest configuration. +func (h *eventHandlerImpl) enable(ctx context.Context) { + h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph()) +} + +func (h *eventHandlerImpl) sendNginxConfig(ctx context.Context, logger logr.Logger, gr *graph.Graph) { + if gr == nil { return - case state.EndpointsOnlyChange: - h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) - depCtx, getErr := h.getDeploymentContext(ctx) - if getErr != nil { - logger.Error(getErr, "error getting deployment context for usage reporting") + } + + if len(gr.Gateways) == 0 { + // still need to update GatewayClass status + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, } - cfg.DeploymentContext = depCtx + h.cfg.statusQueue.Enqueue(obj) + return + } + + for _, gw := range gr.Gateways { + go func() { + if err := h.cfg.nginxProvisioner.RegisterGateway(ctx, gw, gw.DeploymentName.Name); err != nil { + logger.Error(err, "error from provisioner") + } + }() - h.setLatestConfiguration(&cfg) + if !gw.Valid { + obj := &status.QueueObject{ + Deployment: gw.DeploymentName, + UpdateType: status.UpdateAll, + } + h.cfg.statusQueue.Enqueue(obj) + return + } - if h.cfg.plus { - err = h.updateUpstreamServers(cfg) - } else { - err = h.updateNginxConf(ctx, cfg) + stopCh := make(chan struct{}) + deployment := h.cfg.nginxDeployments.GetOrStore(ctx, gw.DeploymentName, stopCh) + if deployment == nil { + panic("expected deployment, got nil") } - case state.ClusterStateChange: - h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) + + cfg := dataplane.BuildConfiguration(ctx, gr, gw, h.cfg.serviceResolver, h.cfg.plus) depCtx, getErr := h.getDeploymentContext(ctx) if getErr != nil { logger.Error(getErr, "error getting deployment context for usage reporting") } cfg.DeploymentContext = depCtx - h.setLatestConfiguration(&cfg) + h.setLatestConfiguration(gw, &cfg) + + deployment.FileLock.Lock() + h.updateNginxConf(deployment, cfg) + deployment.FileLock.Unlock() + + configErr := deployment.GetLatestConfigError() + upstreamErr := deployment.GetLatestUpstreamError() + err := errors.Join(configErr, upstreamErr) - err = h.updateNginxConf(ctx, cfg) + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, + Error: err, + Deployment: gw.DeploymentName, + } + h.cfg.statusQueue.Enqueue(obj) } +} - var nginxReloadRes status.NginxReloadResult - if err != nil { - logger.Error(err, "Failed to update NGINX configuration") - nginxReloadRes.Error = err - if !h.cfg.nginxConfiguredOnStartChecker.ready { - h.cfg.nginxConfiguredOnStartChecker.firstBatchError = err +func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { + for { + item := h.cfg.statusQueue.Dequeue(ctx) + if item == nil { + return } - } else { - logger.Info("NGINX configuration was successfully updated") - if !h.cfg.nginxConfiguredOnStartChecker.ready { - h.cfg.nginxConfiguredOnStartChecker.setAsReady() + + gr := h.cfg.processor.GetLatestGraph() + if gr == nil { + continue } - } - h.latestReloadResult = nginxReloadRes + var nginxReloadRes graph.NginxReloadResult + var gw *graph.Gateway + if item.Deployment.Name != "" { + gwNSName := types.NamespacedName{ + Namespace: item.Deployment.Namespace, + Name: strings.TrimSuffix(item.Deployment.Name, fmt.Sprintf("-%s", h.cfg.gatewayClassName)), + } - h.updateStatuses(ctx, logger, gr) -} + gw = gr.Gateways[gwNSName] + } -func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logger, gr *graph.Graph) { - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig) - if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") + switch { + case item.Error != nil: + h.cfg.logger.Error(item.Error, "Failed to update NGINX configuration") + nginxReloadRes.Error = item.Error + case gw != nil: + h.cfg.logger.Info("NGINX configuration was successfully updated") + } + if gw != nil { + gw.LatestReloadResult = nginxReloadRes + } + + switch item.UpdateType { + case status.UpdateAll: + h.updateStatuses(ctx, gr, gw) + case status.UpdateGateway: + if gw == nil { + continue + } + + gwAddresses, err := getGatewayAddresses( + ctx, + h.cfg.k8sClient, + item.GatewayService, + gw, + h.cfg.gatewayClassName, + ) + if err != nil { + msg := "error getting Gateway Service IP address" + h.cfg.logger.Error(err, msg) + h.cfg.eventRecorder.Eventf( + item.GatewayService, + v1.EventTypeWarning, + "GetServiceIPFailed", + msg+": %s", + err.Error(), + ) + continue + } + + transitionTime := metav1.Now() + + gatewayStatuses := status.PrepareGatewayRequests( + gw, + transitionTime, + gwAddresses, + gw.LatestReloadResult, + ) + h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) + default: + panic(fmt.Sprintf("unknown event type %T", item.UpdateType)) + } } +} +func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph, gw *graph.Gateway) { transitionTime := metav1.Now() + gcReqs := status.PrepareGatewayClassRequests(gr.GatewayClass, gr.IgnoredGatewayClasses, transitionTime) - var gcReqs []frameworkStatus.UpdateRequest - if h.cfg.updateGatewayClassStatus { - gcReqs = status.PrepareGatewayClassRequests(gr.GatewayClass, gr.IgnoredGatewayClasses, transitionTime) + if gw == nil { + h.cfg.statusUpdater.UpdateGroup(ctx, groupAllExceptGateways, gcReqs...) + return } + + gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, gw, h.cfg.gatewayClassName) + if err != nil { + msg := "error getting Gateway Service IP address" + h.cfg.logger.Error(err, msg) + h.cfg.eventRecorder.Eventf( + &v1.Service{}, + v1.EventTypeWarning, + "GetServiceIPFailed", + msg+": %s", + err.Error(), + ) + } + routeReqs := status.PrepareRouteRequests( gr.L4Routes, gr.Routes, transitionTime, - h.latestReloadResult, + gw.LatestReloadResult, h.cfg.gatewayCtlrName, ) @@ -265,11 +366,10 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logge // We put Gateway status updates separately from the rest of the statuses because we want to be able // to update them separately from the rest of the graph whenever the public IP of NGF changes. gwReqs := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, + gw, transitionTime, gwAddresses, - h.latestReloadResult, + gw.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gwReqs...) } @@ -305,131 +405,16 @@ func (h *eventHandlerImpl) parseAndCaptureEvent(ctx context.Context, logger logr // updateNginxConf updates nginx conf files and reloads nginx. func (h *eventHandlerImpl) updateNginxConf( - ctx context.Context, + deployment *agent.Deployment, conf dataplane.Configuration, -) error { +) { files := h.cfg.generator.Generate(conf) - if err := h.cfg.nginxFileMgr.ReplaceFiles(files); err != nil { - return fmt.Errorf("failed to replace NGINX configuration files: %w", err) - } - - if err := h.cfg.nginxRuntimeMgr.Reload(ctx, conf.Version); err != nil { - return fmt.Errorf("failed to reload NGINX: %w", err) - } + h.cfg.nginxUpdater.UpdateConfig(deployment, files) // If using NGINX Plus, update upstream servers using the API. - if err := h.updateUpstreamServers(conf); err != nil { - return fmt.Errorf("failed to update upstream servers: %w", err) + if h.cfg.plus { + h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, conf) } - - return nil -} - -// updateUpstreamServers determines which servers have changed and uses the NGINX Plus API to update them. -// Only applicable when using NGINX Plus. -func (h *eventHandlerImpl) updateUpstreamServers(conf dataplane.Configuration) error { - if !h.cfg.plus { - return nil - } - - prevUpstreams, prevStreamUpstreams, err := h.cfg.nginxRuntimeMgr.GetUpstreams() - if err != nil { - return fmt.Errorf("failed to get upstreams from API: %w", err) - } - - type upstream struct { - name string - servers []ngxclient.UpstreamServer - } - var upstreams []upstream - - for _, u := range conf.Upstreams { - confUpstream := upstream{ - name: u.Name, - servers: ngxConfig.ConvertEndpoints(u.Endpoints), - } - - if u, ok := prevUpstreams[confUpstream.name]; ok { - if !serversEqual(confUpstream.servers, u.Peers) { - upstreams = append(upstreams, confUpstream) - } - } - } - - type streamUpstream struct { - name string - servers []ngxclient.StreamUpstreamServer - } - var streamUpstreams []streamUpstream - - for _, u := range conf.StreamUpstreams { - confUpstream := streamUpstream{ - name: u.Name, - servers: ngxConfig.ConvertStreamEndpoints(u.Endpoints), - } - - if u, ok := prevStreamUpstreams[confUpstream.name]; ok { - if !serversEqual(confUpstream.servers, u.Peers) { - streamUpstreams = append(streamUpstreams, confUpstream) - } - } - } - - var updateErr error - for _, upstream := range upstreams { - if err := h.cfg.nginxRuntimeMgr.UpdateHTTPServers(upstream.name, upstream.servers); err != nil { - updateErr = errors.Join(updateErr, fmt.Errorf( - "couldn't update upstream %q via the API: %w", upstream.name, err)) - } - } - - for _, upstream := range streamUpstreams { - if err := h.cfg.nginxRuntimeMgr.UpdateStreamServers(upstream.name, upstream.servers); err != nil { - updateErr = errors.Join(updateErr, fmt.Errorf( - "couldn't update stream upstream %q via the API: %w", upstream.name, err)) - } - } - - return updateErr -} - -// serversEqual accepts lists of either UpstreamServer/Peer or StreamUpstreamServer/StreamPeer and determines -// if the server names within these lists are equal. -func serversEqual[ - upstreamServer ngxclient.UpstreamServer | ngxclient.StreamUpstreamServer, - peer ngxclient.Peer | ngxclient.StreamPeer, -](newServers []upstreamServer, oldServers []peer) bool { - if len(newServers) != len(oldServers) { - return false - } - - getServerVal := func(T any) string { - var server string - switch t := T.(type) { - case ngxclient.UpstreamServer: - server = t.Server - case ngxclient.StreamUpstreamServer: - server = t.Server - case ngxclient.Peer: - server = t.Server - case ngxclient.StreamPeer: - server = t.Server - } - return server - } - - diff := make(map[string]struct{}, len(newServers)) - for _, s := range newServers { - diff[getServerVal(s)] = struct{}{} - } - - for _, s := range oldServers { - if _, ok := diff[getServerVal(s)]; !ok { - return false - } - } - - return true } // updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status @@ -477,27 +462,42 @@ func getGatewayAddresses( ctx context.Context, k8sClient client.Client, svc *v1.Service, - podConfig ngfConfig.GatewayPodConfig, + gateway *graph.Gateway, + gatewayClassName string, ) ([]gatewayv1.GatewayStatusAddress, error) { - podAddress := []gatewayv1.GatewayStatusAddress{ - { - Type: helpers.GetPointer(gatewayv1.IPAddressType), - Value: podConfig.PodIP, - }, + if gateway == nil { + return nil, nil } var gwSvc v1.Service if svc == nil { - key := types.NamespacedName{Name: podConfig.ServiceName, Namespace: podConfig.Namespace} - if err := k8sClient.Get(ctx, key, &gwSvc); err != nil { - return podAddress, fmt.Errorf("error finding Service for Gateway: %w", err) + svcName := controller.CreateNginxResourceName(gateway.Source.GetName(), gatewayClassName) + key := types.NamespacedName{Name: svcName, Namespace: gateway.Source.GetNamespace()} + + pollCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := wait.PollUntilContextCancel( + pollCtx, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + if err := k8sClient.Get(ctx, key, &gwSvc); err != nil { + return false, nil //nolint:nilerr // need to retry without returning error + } + + return true, nil + }, + ); err != nil { + return nil, fmt.Errorf("error finding Service %s for Gateway: %w", svcName, err) } } else { gwSvc = *svc } var addresses, hostnames []string - if gwSvc.Spec.Type == v1.ServiceTypeLoadBalancer { + switch gwSvc.Spec.Type { + case v1.ServiceTypeLoadBalancer: for _, ingress := range gwSvc.Status.LoadBalancer.Ingress { if ingress.IP != "" { addresses = append(addresses, ingress.IP) @@ -505,6 +505,8 @@ func getGatewayAddresses( hostnames = append(hostnames, ingress.Hostname) } } + default: + addresses = append(addresses, gwSvc.Spec.ClusterIP) } gwAddresses := make([]gatewayv1.GatewayStatusAddress, 0, len(addresses)+len(hostnames)) @@ -537,19 +539,28 @@ func (h *eventHandlerImpl) getDeploymentContext(ctx context.Context) (dataplane. } // GetLatestConfiguration gets the latest configuration. -func (h *eventHandlerImpl) GetLatestConfiguration() *dataplane.Configuration { +func (h *eventHandlerImpl) GetLatestConfiguration() []*dataplane.Configuration { h.lock.Lock() defer h.lock.Unlock() - return h.latestConfiguration + configs := make([]*dataplane.Configuration, 0, len(h.latestConfigurations)) + for _, cfg := range h.latestConfigurations { + configs = append(configs, cfg) + } + + return configs } // setLatestConfiguration sets the latest configuration. -func (h *eventHandlerImpl) setLatestConfiguration(cfg *dataplane.Configuration) { +func (h *eventHandlerImpl) setLatestConfiguration(gateway *graph.Gateway, cfg *dataplane.Configuration) { + if gateway == nil || gateway.Source == nil { + return + } + h.lock.Lock() defer h.lock.Unlock() - h.latestConfiguration = cfg + h.latestConfigurations[client.ObjectKeyFromObject(gateway.Source)] = cfg } func objectFilterKey(obj client.Object, nsName types.NamespacedName) filterKey { @@ -581,56 +592,3 @@ func (h *eventHandlerImpl) nginxGatewayCRDDelete( ) { h.updateControlPlaneAndSetStatus(ctx, logger, nil) } - -func (h *eventHandlerImpl) nginxGatewayServiceUpsert(ctx context.Context, logger logr.Logger, obj client.Object) { - svc, ok := obj.(*v1.Service) - if !ok { - panic(fmt.Errorf("obj type mismatch: got %T, expected %T", svc, &v1.Service{})) - } - - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, svc, h.cfg.gatewayPodConfig) - if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") - } - - gr := h.cfg.processor.GetLatestGraph() - if gr == nil { - return - } - - transitionTime := metav1.Now() - gatewayStatuses := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, - transitionTime, - gwAddresses, - h.latestReloadResult, - ) - h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) -} - -func (h *eventHandlerImpl) nginxGatewayServiceDelete( - ctx context.Context, - logger logr.Logger, - _ types.NamespacedName, -) { - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig) - if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") - } - - gr := h.cfg.processor.GetLatestGraph() - if gr == nil { - return - } - - transitionTime := metav1.Now() - gatewayStatuses := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, - transitionTime, - gwAddresses, - h.latestReloadResult, - ) - h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) -} diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 5774487650..59ed8d8d2a 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -3,9 +3,10 @@ package static import ( "context" "errors" + "time" "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "go.uber.org/zap" @@ -19,111 +20,147 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/status/statusfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing/licensingfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime/runtimefakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/provisionerfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/statefakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) var _ = Describe("eventHandler", func() { var ( - handler *eventHandlerImpl - fakeProcessor *statefakes.FakeChangeProcessor - fakeGenerator *configfakes.FakeGenerator - fakeNginxFileMgr *filefakes.FakeManager - fakeNginxRuntimeMgr *runtimefakes.FakeManager - fakeStatusUpdater *statusfakes.FakeGroupUpdater - fakeEventRecorder *record.FakeRecorder - fakeK8sClient client.WithWatch - namespace = "nginx-gateway" - configName = "nginx-gateway-config" - zapLogLevelSetter zapLogLevelSetter + baseGraph *graph.Graph + handler *eventHandlerImpl + fakeProcessor *statefakes.FakeChangeProcessor + fakeGenerator *configfakes.FakeGenerator + fakeNginxUpdater *agentfakes.FakeNginxUpdater + fakeProvisioner *provisionerfakes.FakeProvisioner + fakeStatusUpdater *statusfakes.FakeGroupUpdater + fakeEventRecorder *record.FakeRecorder + fakeK8sClient client.WithWatch + queue *status.Queue + namespace = "nginx-gateway" + configName = "nginx-gateway-config" + zapLogLevelSetter zapLogLevelSetter + ctx context.Context + cancel context.CancelFunc ) - const nginxGatewayServiceName = "nginx-gateway" - - createService := func(name string) *v1.Service { - return &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "nginx-gateway", - }, - } - } - - expectReconfig := func(expectedConf dataplane.Configuration, expectedFiles []file.File) { + expectReconfig := func(expectedConf dataplane.Configuration, expectedFiles []agent.File) { Expect(fakeProcessor.ProcessCallCount()).Should(Equal(1)) Expect(fakeGenerator.GenerateCallCount()).Should(Equal(1)) Expect(fakeGenerator.GenerateArgsForCall(0)).Should(Equal(expectedConf)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).Should(Equal(1)) - files := fakeNginxFileMgr.ReplaceFilesArgsForCall(0) - Expect(files).Should(Equal(expectedFiles)) - - Expect(fakeNginxRuntimeMgr.ReloadCallCount()).Should(Equal(1)) + Expect(fakeNginxUpdater.UpdateConfigCallCount()).Should(Equal(1)) + _, files := fakeNginxUpdater.UpdateConfigArgsForCall(0) + Expect(expectedFiles).To(Equal(files)) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).Should(Equal(2)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(2)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupAllExceptGateways)) Expect(reqs).To(BeEmpty()) _, name, reqs = fakeStatusUpdater.UpdateGroupArgsForCall(1) Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) + Expect(reqs).To(HaveLen(1)) + + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(1)) } BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext // ignore for test + + baseGraph = &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway"}: { + Valid: true, + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), + }, + }, + }, + } + fakeProcessor = &statefakes.FakeChangeProcessor{} - fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) + fakeProcessor.ProcessReturns(&graph.Graph{}) + fakeProcessor.GetLatestGraphReturns(baseGraph) fakeGenerator = &configfakes.FakeGenerator{} - fakeNginxFileMgr = &filefakes.FakeManager{} - fakeNginxRuntimeMgr = &runtimefakes.FakeManager{} + fakeNginxUpdater = &agentfakes.FakeNginxUpdater{} + fakeProvisioner = &provisionerfakes.FakeProvisioner{} + fakeProvisioner.RegisterGatewayReturns(nil) fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} fakeEventRecorder = record.NewFakeRecorder(1) zapLogLevelSetter = newZapLogLevelSetter(zap.NewAtomicLevel()) - fakeK8sClient = fake.NewFakeClient() + queue = status.NewQueue() - // Needed because handler checks the service from the API on every HandleEventBatch - Expect(fakeK8sClient.Create(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) + gatewaySvc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway-nginx", + }, + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.4", + }, + } + fakeK8sClient = fake.NewFakeClient(gatewaySvc) handler = newEventHandlerImpl(eventHandlerConfig{ - k8sClient: fakeK8sClient, - processor: fakeProcessor, - generator: fakeGenerator, - logLevelSetter: zapLogLevelSetter, - nginxFileMgr: fakeNginxFileMgr, - nginxRuntimeMgr: fakeNginxRuntimeMgr, - statusUpdater: fakeStatusUpdater, - eventRecorder: fakeEventRecorder, - deployCtxCollector: &licensingfakes.FakeCollector{}, - nginxConfiguredOnStartChecker: newNginxConfiguredOnStartChecker(), - controlConfigNSName: types.NamespacedName{Namespace: namespace, Name: configName}, + ctx: ctx, + k8sClient: fakeK8sClient, + processor: fakeProcessor, + generator: fakeGenerator, + logLevelSetter: zapLogLevelSetter, + nginxUpdater: fakeNginxUpdater, + nginxProvisioner: fakeProvisioner, + statusUpdater: fakeStatusUpdater, + eventRecorder: fakeEventRecorder, + deployCtxCollector: &licensingfakes.FakeCollector{}, + graphBuiltHealthChecker: newGraphBuiltHealthChecker(), + statusQueue: queue, + nginxDeployments: agent.NewDeploymentStore(&agentgrpcfakes.FakeConnectionsTracker{}), + controlConfigNSName: types.NamespacedName{Namespace: namespace, Name: configName}, gatewayPodConfig: config.GatewayPodConfig{ ServiceName: "nginx-gateway", Namespace: "nginx-gateway", }, - metricsCollector: collectors.NewControllerNoopCollector(), - updateGatewayClassStatus: true, + gatewayClassName: "nginx", + metricsCollector: collectors.NewControllerNoopCollector(), }) - Expect(handler.cfg.nginxConfiguredOnStartChecker.ready).To(BeFalse()) + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) + }) + + AfterEach(func() { + cancel() }) Describe("Process the Gateway API resources events", func() { - fakeCfgFiles := []file.File{ + fakeCfgFiles := []agent.File{ { - Type: file.TypeRegular, - Path: "test.conf", + Meta: &pb.FileMeta{ + Name: "test.conf", + }, }, } @@ -140,13 +177,12 @@ var _ = Describe("eventHandler", func() { } BeforeEach(func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange /* changed */, &graph.Graph{}) - + fakeProcessor.ProcessReturns(baseGraph) fakeGenerator.GenerateReturns(fakeCfgFiles) }) AfterEach(func() { - Expect(handler.cfg.nginxConfiguredOnStartChecker.ready).To(BeTrue()) + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeTrue()) }) When("a batch has one event", func() { @@ -156,13 +192,14 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) checkUpsertEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) }) - It("should process Delete", func() { e := &events.DeleteEvent{ Type: &gatewayv1.HTTPRoute{}, @@ -172,17 +209,75 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) checkDeleteEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) + }) + + It("should not build anything if Gateway isn't set", func() { + fakeProcessor.ProcessReturns(&graph.Graph{}) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) + Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + // status update for GatewayClass should still occur + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + }) + It("should not build anything if graph is nil", func() { + fakeProcessor.ProcessReturns(nil) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) + Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + // status update for GatewayClass should not occur + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(0)) + }) + It("should update gateway class even if gateway is invalid", func() { + fakeProcessor.ProcessReturns(&graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway"}: { + Valid: false, + }, + }, + }) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + // status update should still occur for GatewayClasses + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) }) }) When("a batch has multiple events", func() { It("should process events", func() { - upsertEvent := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + upsertEvent := &events.UpsertEvent{Resource: &gatewayv1.Gateway{}} deleteEvent := &events.DeleteEvent{ Type: &gatewayv1.HTTPRoute{}, NamespacedName: types.NamespacedName{Namespace: "test", Name: "route"}, @@ -196,68 +291,15 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) }) }) }) - DescribeTable( - "updating statuses of GatewayClass conditionally based on handler configuration", - func(updateGatewayClassStatus bool) { - handler.cfg.updateGatewayClassStatus = updateGatewayClassStatus - - gc := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - } - ignoredGC := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored", - }, - } - - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ - GatewayClass: &graph.GatewayClass{ - Source: gc, - Valid: true, - }, - IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ - client.ObjectKeyFromObject(ignoredGC): ignoredGC, - }, - }) - - e := &events.UpsertEvent{ - Resource: &gatewayv1.HTTPRoute{}, // any supported is OK - } - - batch := []interface{}{e} - - var expectedReqsCount int - if updateGatewayClassStatus { - expectedReqsCount = 2 - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(2)) - - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupAllExceptGateways)) - Expect(reqs).To(HaveLen(expectedReqsCount)) - for _, req := range reqs { - Expect(req.NsName).To(BeElementOf( - client.ObjectKeyFromObject(gc), - client.ObjectKeyFromObject(ignoredGC), - )) - Expect(req.ResourceType).To(Equal(&gatewayv1.GatewayClass{})) - } - }, - Entry("should update statuses of GatewayClass", true), - Entry("should not update statuses of GatewayClass", false), - ) - When("receiving control plane configuration updates", func() { cfg := func(level ngfAPI.ControllerLogLevel) *ngfAPI.NginxGateway { return &ngfAPI.NginxGateway{ @@ -277,9 +319,13 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{&events.UpsertEvent{Resource: cfg(ngfAPI.ControllerLogLevelError)}} handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) + + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(BeNumerically(">", 1)) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(HaveLen(1)) @@ -292,9 +338,13 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{&events.UpsertEvent{Resource: cfg(ngfAPI.ControllerLogLevel("invalid"))}} handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) + + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(BeNumerically(">", 1)) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(HaveLen(1)) @@ -320,9 +370,13 @@ var _ = Describe("eventHandler", func() { } handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) + + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(BeNumerically(">", 1)) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(BeEmpty()) @@ -334,78 +388,7 @@ var _ = Describe("eventHandler", func() { }) }) - When("receiving Service updates", func() { - const notNginxGatewayServiceName = "not-nginx-gateway" - - BeforeEach(func() { - fakeProcessor.GetLatestGraphReturns(&graph.Graph{}) - - Expect(fakeK8sClient.Create(context.Background(), createService(notNginxGatewayServiceName))).To(Succeed()) - }) - - It("should not call UpdateAddresses if the Service is not for the Gateway Pod", func() { - e := &events.UpsertEvent{Resource: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-nginx-gateway", - }, - }} - batch := []interface{}{e} - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(BeZero()) - - de := &events.DeleteEvent{Type: &v1.Service{}} - batch = []interface{}{de} - Expect(fakeK8sClient.Delete(context.Background(), createService(notNginxGatewayServiceName))).To(Succeed()) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(BeZero()) - }) - - It("should update the addresses when the Gateway Service is upserted", func() { - e := &events.UpsertEvent{Resource: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-gateway", - Namespace: "nginx-gateway", - }, - }} - batch := []interface{}{e} - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) - }) - - It("should update the addresses when the Gateway Service is deleted", func() { - e := &events.DeleteEvent{ - Type: &v1.Service{}, - NamespacedName: types.NamespacedName{ - Name: "nginx-gateway", - Namespace: "nginx-gateway", - }, - } - batch := []interface{}{e} - Expect(fakeK8sClient.Delete(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) - }) - }) - - When("receiving an EndpointsOnlyChange update", func() { + Context("NGINX Plus API calls", func() { e := &events.UpsertEvent{Resource: &discoveryV1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "nginx-gateway", @@ -415,24 +398,19 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} BeforeEach(func() { - fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{}) - upstreams := ngxclient.Upstreams{ - "one": ngxclient.Upstream{ - Peers: []ngxclient.Peer{ - {Server: "server1"}, - }, - }, - } - - streamUpstreams := ngxclient.StreamUpstreams{ - "two": ngxclient.StreamUpstream{ - Peers: []ngxclient.StreamPeer{ - {Server: "server2"}, + fakeProcessor.ProcessReturns(&graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, }, }, - } - - fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, streamUpstreams, nil) + }) }) When("running NGINX Plus", func() { @@ -441,13 +419,15 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) dcfg.NginxPlus = dataplane.NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) - Expect(fakeGenerator.GenerateCallCount()).To(Equal(0)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(0)) - Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(1)) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) + + Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateUpstreamServersCallCount()).To(Equal(1)) }) }) @@ -455,150 +435,87 @@ var _ = Describe("eventHandler", func() { It("should not call the NGINX Plus API", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(1)) - Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(0)) - Expect(fakeNginxRuntimeMgr.ReloadCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateConfigCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateUpstreamServersCallCount()).To(Equal(0)) }) }) }) - When("updating upstream servers", func() { - conf := dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "one", - }, - }, - StreamUpstreams: []dataplane.Upstream{ - { - Name: "two", - }, + It("should update status when receiving a queue event", func() { + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, + Deployment: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), }, + Error: errors.New("status error"), } + queue.Enqueue(obj) - BeforeEach(func() { - upstreams := ngxclient.Upstreams{ - "one": ngxclient.Upstream{ - Peers: []ngxclient.Peer{ - {Server: "server1"}, - }, - }, - } - - streamUpstreams := ngxclient.StreamUpstreams{ - "two": ngxclient.StreamUpstream{ - Peers: []ngxclient.StreamPeer{ - {Server: "server2"}, - }, - }, - } - - fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, streamUpstreams, nil) - }) - - When("running NGINX Plus", func() { - BeforeEach(func() { - handler.cfg.plus = true - }) - - It("should update servers using the NGINX Plus API", func() { - Expect(handler.updateUpstreamServers(conf)).To(Succeed()) - Expect(fakeNginxRuntimeMgr.UpdateHTTPServersCallCount()).To(Equal(1)) - }) - - It("should return error when GET API returns an error", func() { - fakeNginxRuntimeMgr.GetUpstreamsReturns(nil, nil, errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - - It("should return error when UpdateHTTPServers API returns an error", func() { - fakeNginxRuntimeMgr.UpdateHTTPServersReturns(errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - - It("should return error when UpdateStreamServers API returns an error", func() { - fakeNginxRuntimeMgr.UpdateStreamServersReturns(errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - }) - - When("not running NGINX Plus", func() { - It("should not do anything", func() { - Expect(handler.updateUpstreamServers(conf)).To(Succeed()) - - Expect(fakeNginxRuntimeMgr.UpdateHTTPServersCallCount()).To(Equal(0)) - }) - }) - }) - - It("should set the health checker status properly when there are changes", func() { - e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} - batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() - - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(2)) - Expect(readyChannel).To(BeClosed()) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) + gr := handler.cfg.processor.GetLatestGraph() + gw := gr.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway"}] + Expect(gw.LatestReloadResult.Error.Error()).To(Equal("status error")) }) - It("should set the health checker status properly when there are no changes or errors", func() { - e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} - batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - - Expect(readyChannel).To(BeClosed()) + It("should update Gateway status when receiving a queue event", func() { + obj := &status.QueueObject{ + UpdateType: status.UpdateGateway, + Deployment: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), + }, + GatewayService: &v1.Service{}, + } + queue.Enqueue(obj) - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) }) - It("should set the health checker status properly when there is an error", func() { + It("should update nginx conf only when leader", func() { e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() - - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - fakeNginxRuntimeMgr.ReloadReturns(errors.New("reload error")) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - - // now send an update with no changes; should still return an error - fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - - // error goes away - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - fakeNginxRuntimeMgr.ReloadReturns(nil) + readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() + + fakeProcessor.ProcessReturns(&graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + }, + }, + }) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) Expect(readyChannel).To(BeClosed()) - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).To(Succeed()) }) It("should panic for an unknown event type", func() { @@ -611,107 +528,36 @@ var _ = Describe("eventHandler", func() { Expect(handle).Should(Panic()) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) }) }) -var _ = Describe("serversEqual", func() { - DescribeTable("determines if HTTP server lists are equal", - func(newServers []ngxclient.UpstreamServer, oldServers []ngxclient.Peer, equal bool) { - Expect(serversEqual(newServers, oldServers)).To(Equal(equal)) - }, - Entry("different length", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server2"}, - }, - false, - ), - Entry("differing elements", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server3"}, - }, - false, - ), - Entry("same elements", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server2"}, - }, - true, - ), - ) - DescribeTable("determines if stream server lists are equal", - func(newServers []ngxclient.StreamUpstreamServer, oldServers []ngxclient.StreamPeer, equal bool) { - Expect(serversEqual(newServers, oldServers)).To(Equal(equal)) - }, - Entry("different length", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server2"}, - }, - false, - ), - Entry("differing elements", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server3"}, - }, - false, - ), - Entry("same elements", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server2"}, - }, - true, - ), - ) -}) - var _ = Describe("getGatewayAddresses", func() { It("gets gateway addresses from a Service", func() { fakeClient := fake.NewFakeClient() - podConfig := config.GatewayPodConfig{ - PodIP: "1.2.3.4", - ServiceName: "my-service", - Namespace: "nginx-gateway", + + // no Service exists yet, should get error and no Address + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, + }, } - // no Service exists yet, should get error and Pod Address - addrs, err := getGatewayAddresses(context.Background(), fakeClient, nil, podConfig) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + addrs, err := getGatewayAddresses(ctx, fakeClient, nil, gateway, "nginx") Expect(err).To(HaveOccurred()) - Expect(addrs).To(HaveLen(1)) - Expect(addrs[0].Value).To(Equal("1.2.3.4")) + Expect(addrs).To(BeNil()) // Create LoadBalancer Service svc := v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - Namespace: "nginx-gateway", + Name: "gateway-nginx", + Namespace: "test-ns", }, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeLoadBalancer, @@ -732,11 +578,31 @@ var _ = Describe("getGatewayAddresses", func() { Expect(fakeClient.Create(context.Background(), &svc)).To(Succeed()) - addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, podConfig) + addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") Expect(err).ToNot(HaveOccurred()) Expect(addrs).To(HaveLen(2)) Expect(addrs[0].Value).To(Equal("34.35.36.37")) Expect(addrs[1].Value).To(Equal("myhost")) + + Expect(fakeClient.Delete(context.Background(), &svc)).To(Succeed()) + // Create ClusterIP Service + svc = v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway-nginx", + Namespace: "test-ns", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ClusterIP: "12.13.14.15", + }, + } + + Expect(fakeClient.Create(context.Background(), &svc)).To(Succeed()) + + addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") + Expect(err).ToNot(HaveOccurred()) + Expect(addrs).To(HaveLen(1)) + Expect(addrs[0].Value).To(Equal("12.13.14.15")) }) }) @@ -752,6 +618,17 @@ var _ = Describe("getDeploymentContext", func() { }) When("nginx plus is true", func() { + var ctx context.Context + var cancel context.CancelFunc + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext + }) + + AfterEach(func() { + cancel() + }) + It("returns deployment context", func() { expDepCtx := dataplane.DeploymentContext{ Integration: "ngf", @@ -761,7 +638,9 @@ var _ = Describe("getDeploymentContext", func() { } handler := newEventHandlerImpl(eventHandlerConfig{ - plus: true, + ctx: ctx, + statusQueue: status.NewQueue(), + plus: true, deployCtxCollector: &licensingfakes.FakeCollector{ CollectStub: func(_ context.Context) (dataplane.DeploymentContext, error) { return expDepCtx, nil @@ -777,7 +656,9 @@ var _ = Describe("getDeploymentContext", func() { expErr := errors.New("collect error") handler := newEventHandlerImpl(eventHandlerConfig{ - plus: true, + ctx: ctx, + statusQueue: status.NewQueue(), + plus: true, deployCtxCollector: &licensingfakes.FakeCollector{ CollectStub: func(_ context.Context) (dataplane.DeploymentContext, error) { return dataplane.DeploymentContext{}, expErr diff --git a/internal/mode/static/health.go b/internal/mode/static/health.go index 180c67d643..a0fe4e9b59 100644 --- a/internal/mode/static/health.go +++ b/internal/mode/static/health.go @@ -6,49 +6,44 @@ import ( "sync" ) -// newNginxConfiguredOnStartChecker creates a new nginxConfiguredOnStartChecker. -func newNginxConfiguredOnStartChecker() *nginxConfiguredOnStartChecker { - return &nginxConfiguredOnStartChecker{ +// newGraphBuiltHealthChecker creates a new graphBuiltHealthChecker. +func newGraphBuiltHealthChecker() *graphBuiltHealthChecker { + return &graphBuiltHealthChecker{ readyCh: make(chan struct{}), } } -// nginxConfiguredOnStartChecker is used to check if nginx is successfully configured and if the NGF Pod is ready. -type nginxConfiguredOnStartChecker struct { - // firstBatchError is set when the first batch fails to configure nginx - // and we don't want to set ourselves as ready on the next batch if nothing changes - firstBatchError error - // readyCh is a channel that is initialized in newNginxConfiguredOnStartChecker and represents if the NGF Pod is ready. +// graphBuiltHealthChecker is used to check if the initial graph is built and the NGF Pod is ready. +type graphBuiltHealthChecker struct { + // readyCh is a channel that is initialized in newGraphBuiltHealthChecker and represents if the NGF Pod is ready. readyCh chan struct{} lock sync.RWMutex ready bool } // readyCheck returns the ready-state of the Pod. It satisfies the controller-runtime Checker type. -// We are considered ready after the handler processed the first batch. In case there is NGINX configuration -// to write, it must be written and NGINX must be reloaded successfully. -func (h *nginxConfiguredOnStartChecker) readyCheck(_ *http.Request) error { +// We are considered ready after the first graph is built. +func (h *graphBuiltHealthChecker) readyCheck(_ *http.Request) error { h.lock.RLock() defer h.lock.RUnlock() if !h.ready { - return errors.New("nginx has not yet become ready to accept traffic") + return errors.New("control plane is not yet ready") } return nil } // setAsReady marks the health check as ready. -func (h *nginxConfiguredOnStartChecker) setAsReady() { +func (h *graphBuiltHealthChecker) setAsReady() { h.lock.Lock() defer h.lock.Unlock() h.ready = true - h.firstBatchError = nil close(h.readyCh) } // getReadyCh returns a read-only channel, which determines if the NGF Pod is ready. -func (h *nginxConfiguredOnStartChecker) getReadyCh() <-chan struct{} { +func (h *graphBuiltHealthChecker) getReadyCh() <-chan struct{} { return h.readyCh } diff --git a/internal/mode/static/health_test.go b/internal/mode/static/health_test.go index 5bfd7aab73..7246283ed9 100644 --- a/internal/mode/static/health_test.go +++ b/internal/mode/static/health_test.go @@ -9,9 +9,9 @@ import ( func TestReadyCheck(t *testing.T) { t.Parallel() g := NewWithT(t) - nginxChecker := newNginxConfiguredOnStartChecker() - g.Expect(nginxChecker.readyCheck(nil)).ToNot(Succeed()) + healthChecker := newGraphBuiltHealthChecker() + g.Expect(healthChecker.readyCheck(nil)).ToNot(Succeed()) - nginxChecker.ready = true - g.Expect(nginxChecker.readyCheck(nil)).To(Succeed()) + healthChecker.ready = true + g.Expect(healthChecker.readyCheck(nil)).To(Succeed()) } diff --git a/internal/mode/static/log_level_setters.go b/internal/mode/static/log_level_setters.go index 072c760e44..765cb80e0f 100644 --- a/internal/mode/static/log_level_setters.go +++ b/internal/mode/static/log_level_setters.go @@ -3,8 +3,6 @@ package static import ( "errors" - "github.com/go-kit/log" - "github.com/prometheus/common/promlog" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -63,41 +61,3 @@ func (z zapLogLevelSetter) SetLevel(level string) error { func (z zapLogLevelSetter) Enabled(level zapcore.Level) bool { return z.atomicLevel.Enabled(level) } - -// leveledPrometheusLogger is a leveled prometheus logger. -// This interface is required because the promlog.NewDynamic returns an unexported type *logger. -type leveledPrometheusLogger interface { - log.Logger - SetLevel(level *promlog.AllowedLevel) -} - -type promLogLevelSetter struct { - logger leveledPrometheusLogger -} - -func newPromLogLevelSetter(logger leveledPrometheusLogger) promLogLevelSetter { - return promLogLevelSetter{logger: logger} -} - -func newLeveledPrometheusLogger() (leveledPrometheusLogger, error) { - logFormat := &promlog.AllowedFormat{} - - if err := logFormat.Set("json"); err != nil { - return nil, err - } - - logConfig := &promlog.Config{Format: logFormat} - logger := promlog.NewDynamic(logConfig) - - return logger, nil -} - -func (p promLogLevelSetter) SetLevel(level string) error { - al := &promlog.AllowedLevel{} - if err := al.Set(level); err != nil { - return err - } - - p.logger.SetLevel(al) - return nil -} diff --git a/internal/mode/static/log_level_setters_test.go b/internal/mode/static/log_level_setters_test.go index b9dce5ae71..844b5a8f91 100644 --- a/internal/mode/static/log_level_setters_test.go +++ b/internal/mode/static/log_level_setters_test.go @@ -58,19 +58,3 @@ func TestZapLogLevelSetter_SetLevel(t *testing.T) { g.Expect(zapSetter.SetLevel("invalid")).ToNot(Succeed()) } - -func TestPromLogLevelSetter_SetLevel(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - logger, err := newLeveledPrometheusLogger() - g.Expect(err).ToNot(HaveOccurred()) - - promSetter := newPromLogLevelSetter(logger) - - g.Expect(promSetter.SetLevel("error")).To(Succeed()) - g.Expect(promSetter.SetLevel("info")).To(Succeed()) - g.Expect(promSetter.SetLevel("debug")).To(Succeed()) - - g.Expect(promSetter.SetLevel("invalid")).ToNot(Succeed()) -} diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 6ab99b4516..1af84ebe23 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -3,16 +3,18 @@ package static import ( "context" "fmt" - "os" "time" "github.com/go-logr/logr" tel "github.com/nginx/telemetry-exporter/pkg/telemetry" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "google.golang.org/grpc" appsv1 "k8s.io/api/apps/v1" + authv1 "k8s.io/api/authentication/v1" apiv1 "k8s.io/api/core/v1" discoveryV1 "k8s.io/api/discovery/v1" + rbacv1 "k8s.io/api/rbac/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,23 +47,25 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/framework/runnables" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" + frameworkStatus "github.com/nginx/nginx-gateway-fabric/internal/framework/status" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" ngxcfg "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/observability" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/upstreamsettings" ngxvalidation "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/validation" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - ngxruntime "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/telemetry" ) @@ -73,6 +77,7 @@ const ( plusCAField = "ca.crt" plusClientCertField = "tls.crt" plusClientKeyField = "tls.key" + grpcServerPort = 8443 ) var scheme = runtime.NewScheme() @@ -88,12 +93,13 @@ func init() { utilruntime.Must(ngfAPIv1alpha2.AddToScheme(scheme)) utilruntime.Must(apiext.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(authv1.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) } -//nolint:gocyclo func StartManager(cfg config.Config) error { - nginxChecker := newNginxConfiguredOnStartChecker() - mgr, err := createManager(cfg, nginxChecker) + healthChecker := newGraphBuiltHealthChecker() + mgr, err := createManager(cfg, healthChecker) if err != nil { return fmt.Errorf("cannot build runtime manager: %w", err) } @@ -101,12 +107,7 @@ func StartManager(cfg config.Config) error { recorderName := fmt.Sprintf("nginx-gateway-fabric-%s", cfg.GatewayClassName) recorder := mgr.GetEventRecorderFor(recorderName) - promLogger, err := newLeveledPrometheusLogger() - if err != nil { - return fmt.Errorf("error creating leveled prometheus logger: %w", err) - } - - logLevelSetter := newMultiLogLevelSetter(newZapLogLevelSetter(cfg.AtomicLevel), newPromLogLevelSetter(promLogger)) + logLevelSetter := newMultiLogLevelSetter(newZapLogLevelSetter(cfg.AtomicLevel)) ctx := ctlr.SetupSignalHandler() @@ -119,12 +120,6 @@ func StartManager(cfg config.Config) error { return err } - // protectedPorts is the map of ports that may not be configured by a listener, and the name of what it is used for - protectedPorts := map[int32]string{ - int32(cfg.MetricsConfig.Port): "MetricsPort", //nolint:gosec // port will not overflow int32 - int32(cfg.HealthConfig.Port): "HealthPort", //nolint:gosec // port will not overflow int32 - } - mustExtractGVK := kinds.NewMustExtractGKV(scheme) genericValidator := ngxvalidation.GenericValidator{} @@ -146,117 +141,119 @@ func StartManager(cfg config.Config) error { }, EventRecorder: recorder, MustExtractGVK: mustExtractGVK, - ProtectedPorts: protectedPorts, PlusSecrets: plusSecrets, }) - // Clear the configuration folders to ensure that no files are left over in case the control plane was restarted - // (this assumes the folders are in a shared volume). - removedPaths, err := file.ClearFolders(file.NewStdLibOSFileManager(), ngxcfg.ConfigFolders) - for _, path := range removedPaths { - cfg.Logger.Info("removed configuration file", "path", path) - } - if err != nil { - return fmt.Errorf("cannot clear NGINX configuration folders: %w", err) - } - - processHandler := ngxruntime.NewProcessHandlerImpl(os.ReadFile, os.Stat) - - // Ensure NGINX is running before registering metrics & starting the manager. - p, err := processHandler.FindMainProcess(ctx, ngxruntime.PidFileTimeout) - if err != nil { - return fmt.Errorf("NGINX is not running: %w", err) - } - cfg.Logger.V(1).Info("NGINX is running with PID", "pid", p) - - var ( - ngxruntimeCollector ngxruntime.MetricsCollector = collectors.NewManagerNoopCollector() - handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() - ) - - var ngxPlusClient ngxruntime.NginxPlusClient - if cfg.Plus { - ngxPlusClient, err = ngxruntime.CreatePlusClient() - if err != nil { - return fmt.Errorf("error creating NGINX plus client: %w", err) - } - } + var handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() if cfg.MetricsConfig.Enabled { constLabels := map[string]string{"class": cfg.GatewayClassName} - var ngxCollector prometheus.Collector - if cfg.Plus { - ngxCollector, err = collectors.NewNginxPlusMetricsCollector(ngxPlusClient, constLabels, promLogger) - } else { - ngxCollector = collectors.NewNginxMetricsCollector(constLabels, promLogger) - } - if err != nil { - return fmt.Errorf("cannot create nginx metrics collector: %w", err) - } - ngxruntimeCollector = collectors.NewManagerMetricsCollector(constLabels) handlerCollector = collectors.NewControllerCollector(constLabels) - - ngxruntimeCollector, ok := ngxruntimeCollector.(prometheus.Collector) - if !ok { - return fmt.Errorf("ngxruntimeCollector is not a prometheus.Collector: %w", status.ErrFailedAssert) - } handlerCollector, ok := handlerCollector.(prometheus.Collector) if !ok { - return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", status.ErrFailedAssert) + return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", frameworkStatus.ErrFailedAssert) } - metrics.Registry.MustRegister( - ngxCollector, - ngxruntimeCollector, - handlerCollector, - ) + metrics.Registry.MustRegister(handlerCollector) } - statusUpdater := status.NewUpdater( + statusUpdater := frameworkStatus.NewUpdater( mgr.GetClient(), cfg.Logger.WithName("statusUpdater"), ) - groupStatusUpdater := status.NewLeaderAwareGroupUpdater(statusUpdater) + groupStatusUpdater := frameworkStatus.NewLeaderAwareGroupUpdater(statusUpdater) deployCtxCollector := licensing.NewDeploymentContextCollector(licensing.DeploymentContextCollectorConfig{ K8sClientReader: mgr.GetAPIReader(), PodUID: cfg.GatewayPodConfig.UID, Logger: cfg.Logger.WithName("deployCtxCollector"), }) + statusQueue := status.NewQueue() + resetConnChan := make(chan struct{}) + nginxUpdater := agent.NewNginxUpdater( + cfg.Logger.WithName("nginxUpdater"), + mgr.GetAPIReader(), + statusQueue, + resetConnChan, + cfg.Plus, + ) + + tokenAudience := fmt.Sprintf( + "%s.%s.svc", + cfg.GatewayPodConfig.ServiceName, + cfg.GatewayPodConfig.Namespace, + ) + + grpcServer := agentgrpc.NewServer( + cfg.Logger.WithName("agentGRPCServer"), + grpcServerPort, + []func(*grpc.Server){ + nginxUpdater.CommandService.Register, + nginxUpdater.FileService.Register, + }, + mgr.GetClient(), + tokenAudience, + resetConnChan, + ) + + if err = mgr.Add(&runnables.LeaderOrNonLeader{Runnable: grpcServer}); err != nil { + return fmt.Errorf("cannot register grpc server: %w", err) + } + + nginxProvisioner, provLoop, err := provisioner.NewNginxProvisioner( + ctx, + mgr, + provisioner.Config{ + DeploymentStore: nginxUpdater.NginxDeployments, + StatusQueue: statusQueue, + Logger: cfg.Logger.WithName("provisioner"), + EventRecorder: recorder, + GatewayPodConfig: &cfg.GatewayPodConfig, + GCName: cfg.GatewayClassName, + AgentTLSSecretName: cfg.AgentTLSSecretName, + NGINXSCCName: cfg.NGINXSCCName, + Plus: cfg.Plus, + NginxDockerSecretNames: cfg.NginxDockerSecretNames, + PlusUsageConfig: &cfg.UsageReportConfig, + }, + ) + if err != nil { + return fmt.Errorf("error building provisioner: %w", err) + } + + if err := mgr.Add(&runnables.LeaderOrNonLeader{Runnable: provLoop}); err != nil { + return fmt.Errorf("cannot register provisioner event loop: %w", err) + } + eventHandler := newEventHandlerImpl(eventHandlerConfig{ - nginxFileMgr: file.NewManagerImpl( - cfg.Logger.WithName("nginxFileManager"), - file.NewStdLibOSFileManager(), - ), + ctx: ctx, + nginxUpdater: nginxUpdater, + nginxProvisioner: nginxProvisioner, metricsCollector: handlerCollector, - nginxRuntimeMgr: ngxruntime.NewManagerImpl( - ngxPlusClient, - ngxruntimeCollector, - cfg.Logger.WithName("nginxRuntimeManager"), - processHandler, - ngxruntime.NewVerifyClient(ngxruntime.NginxReloadTimeout), - ), - statusUpdater: groupStatusUpdater, - processor: processor, - serviceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), + statusUpdater: groupStatusUpdater, + processor: processor, + serviceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), generator: ngxcfg.NewGeneratorImpl( cfg.Plus, &cfg.UsageReportConfig, cfg.Logger.WithName("generator"), ), - k8sClient: mgr.GetClient(), - k8sReader: mgr.GetAPIReader(), - logLevelSetter: logLevelSetter, - eventRecorder: recorder, - deployCtxCollector: deployCtxCollector, - nginxConfiguredOnStartChecker: nginxChecker, - gatewayPodConfig: cfg.GatewayPodConfig, - controlConfigNSName: controlConfigNSName, - gatewayCtlrName: cfg.GatewayCtlrName, - updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, - plus: cfg.Plus, + k8sClient: mgr.GetClient(), + k8sReader: mgr.GetAPIReader(), + logger: cfg.Logger.WithName("eventHandler"), + logLevelSetter: logLevelSetter, + eventRecorder: recorder, + deployCtxCollector: deployCtxCollector, + graphBuiltHealthChecker: healthChecker, + gatewayPodConfig: cfg.GatewayPodConfig, + controlConfigNSName: controlConfigNSName, + gatewayCtlrName: cfg.GatewayCtlrName, + gatewayClassName: cfg.GatewayClassName, + plus: cfg.Plus, + statusQueue: statusQueue, + nginxDeployments: nginxUpdater.NginxDeployments, }) objects, objectLists := prepareFirstEventBatchPreparerArgs(cfg) @@ -273,8 +270,12 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register event loop: %w", err) } - if err = mgr.Add(runnables.NewEnableAfterBecameLeader(groupStatusUpdater.Enable)); err != nil { - return fmt.Errorf("cannot register status updater: %w", err) + if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ + groupStatusUpdater.Enable, + nginxProvisioner.Enable, + eventHandler.enable, + })); err != nil { + return fmt.Errorf("cannot register functions that get called after Pod becomes leader: %w", err) } if cfg.ProductTelemetryConfig.Enabled { @@ -282,7 +283,7 @@ func StartManager(cfg config.Config) error { K8sClientReader: mgr.GetAPIReader(), GraphGetter: processor, ConfigurationGetter: eventHandler, - Version: cfg.Version, + Version: cfg.GatewayPodConfig.Version, PodNSName: types.NamespacedName{ Namespace: cfg.GatewayPodConfig.Namespace, Name: cfg.GatewayPodConfig.Name, @@ -291,7 +292,7 @@ func StartManager(cfg config.Config) error { Flags: cfg.Flags, }) - job, err := createTelemetryJob(cfg, dataCollector, nginxChecker.getReadyCh()) + job, err := createTelemetryJob(cfg, dataCollector, healthChecker.getReadyCh()) if err != nil { return fmt.Errorf("cannot create telemetry job: %w", err) } @@ -332,7 +333,7 @@ func createPolicyManager( return policies.NewManager(mustExtractGVK, cfgs...) } -func createManager(cfg config.Config, nginxChecker *nginxConfiguredOnStartChecker) (manager.Manager, error) { +func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (manager.Manager, error) { options := manager.Options{ Scheme: scheme, Logger: cfg.Logger.V(1), @@ -367,11 +368,24 @@ func createManager(cfg config.Config, nginxChecker *nginxConfiguredOnStartChecke } if cfg.HealthConfig.Enabled { - if err := mgr.AddReadyzCheck("readyz", nginxChecker.readyCheck); err != nil { + if err := mgr.AddReadyzCheck("readyz", healthChecker.readyCheck); err != nil { return nil, fmt.Errorf("error adding ready check: %w", err) } } + // Add an indexer to get pods by their IP address. This is used when validating that an agent + // connection is coming from the right place. + var podIPIndexFunc client.IndexerFunc = index.PodIPIndexFunc + if err := controller.AddIndex( + context.Background(), + mgr.GetFieldIndexer(), + &apiv1.Pod{}, + "status.podIP", + podIPIndexFunc, + ); err != nil { + return nil, fmt.Errorf("error adding pod IP indexer: %w", err) + } + return mgr, nil } @@ -415,12 +429,6 @@ func registerControllers( options := []controller.Option{ controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), } - if cfg.GatewayNsName != nil { - options = append( - options, - controller.WithNamespacedNameFilter(filter.CreateSingleResourceFilter(*cfg.GatewayNsName)), - ) - } return options }(), }, @@ -437,19 +445,6 @@ func registerControllers( controller.WithK8sPredicate(predicate.ServicePortsChangedPredicate{}), }, }, - { - objectType: &apiv1.Service{}, - name: "ngf-service", // unique controller names are needed and we have multiple Service ctlrs - options: func() []controller.Option { - svcNSName := types.NamespacedName{ - Namespace: cfg.GatewayPodConfig.Namespace, - Name: cfg.GatewayPodConfig.ServiceName, - } - return []controller.Option{ - controller.WithK8sPredicate(predicate.GatewayServicePredicate{NSName: svcNSName}), - } - }(), - }, { objectType: &apiv1.Secret{}, options: []controller.Option{ @@ -485,7 +480,7 @@ func registerControllers( }, }, { - objectType: &ngfAPIv1alpha1.NginxProxy{}, + objectType: &ngfAPIv1alpha2.NginxProxy{}, options: []controller.Option{ controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), }, @@ -545,6 +540,7 @@ func registerControllers( objectType: &ngfAPIv1alpha1.NginxGateway{}, options: []controller.Option{ controller.WithNamespacedNameFilter(filter.CreateSingleResourceFilter(controlConfigNSName)), + controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), }, }) if err := setInitialConfig( @@ -746,7 +742,7 @@ func prepareFirstEventBatchPreparerArgs(cfg config.Config) ([]client.Object, []c &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, &gatewayv1.GRPCRouteList{}, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, &ngfAPIv1alpha2.ObservabilityPolicyList{}, @@ -770,16 +766,7 @@ func prepareFirstEventBatchPreparerArgs(cfg config.Config) ([]client.Object, []c ) } - gwNsName := cfg.GatewayNsName - - if gwNsName == nil { - objectLists = append(objectLists, &gatewayv1.GatewayList{}) - } else { - objects = append( - objects, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: gwNsName.Name, Namespace: gwNsName.Namespace}}, - ) - } + objectLists = append(objectLists, &gatewayv1.GatewayList{}) return objects, objectLists } diff --git a/internal/mode/static/manager_test.go b/internal/mode/static/manager_test.go index 5361be4deb..9a9f0768b7 100644 --- a/internal/mode/static/manager_test.go +++ b/internal/mode/static/manager_test.go @@ -45,10 +45,9 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { cfg config.Config }{ { - name: "gwNsName is nil", + name: "base case", cfg: config.Config{ GatewayClassName: gcName, - GatewayNsName: nil, ExperimentalFeatures: false, SnippetsFilters: false, }, @@ -63,7 +62,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &gatewayv1.HTTPRouteList{}, &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, &gatewayv1.GRPCRouteList{}, partialObjectMetadataList, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, @@ -72,49 +71,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil", + name: "experimental enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, - ExperimentalFeatures: false, - SnippetsFilters: false, - }, - expectedObjects: []client.Object{ - &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, - }, - expectedObjectLists: []client.ObjectList{ - &apiv1.ServiceList{}, - &apiv1.SecretList{}, - &apiv1.NamespaceList{}, - &discoveryV1.EndpointSliceList{}, - &gatewayv1.HTTPRouteList{}, - &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, - &gatewayv1.GRPCRouteList{}, - partialObjectMetadataList, - &ngfAPIv1alpha1.ClientSettingsPolicyList{}, - &ngfAPIv1alpha2.ObservabilityPolicyList{}, - &ngfAPIv1alpha1.UpstreamSettingsPolicyList{}, - }, - }, - { - name: "gwNsName is not nil and experimental enabled", - cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: true, SnippetsFilters: false, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -123,8 +87,9 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.ConfigMapList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1alpha3.BackendTLSPolicyList{}, &gatewayv1alpha2.TLSRouteList{}, @@ -135,19 +100,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil and snippets filters enabled", + name: "snippets filters enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: false, SnippetsFilters: true, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -155,8 +115,9 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.NamespaceList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1.GRPCRouteList{}, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, @@ -166,19 +127,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil, experimental and snippets filters enabled", + name: "experimental and snippets filters enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: true, SnippetsFilters: true, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -187,8 +143,9 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.ConfigMapList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1alpha3.BackendTLSPolicyList{}, &gatewayv1alpha2.TLSRouteList{}, diff --git a/internal/mode/static/metrics/collectors/nginx.go b/internal/mode/static/metrics/collectors/nginx.go deleted file mode 100644 index 838dcf6429..0000000000 --- a/internal/mode/static/metrics/collectors/nginx.go +++ /dev/null @@ -1,48 +0,0 @@ -package collectors - -import ( - "fmt" - - "github.com/go-kit/log" - "github.com/nginxinc/nginx-plus-go-client/client" - prometheusClient "github.com/nginxinc/nginx-prometheus-exporter/client" - nginxCollector "github.com/nginxinc/nginx-prometheus-exporter/collector" - "github.com/prometheus/client_golang/prometheus" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -const ( - nginxStatusSock = "/var/run/nginx/nginx-status.sock" - nginxStatusURI = "http://config-status/stub_status" -) - -// NewNginxMetricsCollector creates an NginxCollector which fetches stats from NGINX over a unix socket. -func NewNginxMetricsCollector(constLabels map[string]string, logger log.Logger) prometheus.Collector { - httpClient := runtime.GetSocketClient(nginxStatusSock) - ngxClient := prometheusClient.NewNginxClient(&httpClient, nginxStatusURI) - - return nginxCollector.NewNginxCollector(ngxClient, metrics.Namespace, constLabels, logger) -} - -// NewNginxPlusMetricsCollector creates an NginxCollector which fetches stats from NGINX Plus API over a unix socket. -func NewNginxPlusMetricsCollector( - plusClient runtime.NginxPlusClient, - constLabels map[string]string, - logger log.Logger, -) (prometheus.Collector, error) { - nc, ok := plusClient.(*client.NginxClient) - if !ok { - panic(fmt.Sprintf("expected *client.NginxClient, got %T", plusClient)) - } - collector := nginxCollector.NewNginxPlusCollector( - nc, - metrics.Namespace, - nginxCollector.VariableLabelNames{}, - constLabels, - logger, - ) - - return collector, nil -} diff --git a/internal/mode/static/metrics/collectors/nginx_runtime.go b/internal/mode/static/metrics/collectors/nginx_runtime.go deleted file mode 100644 index c84e696a2f..0000000000 --- a/internal/mode/static/metrics/collectors/nginx_runtime.go +++ /dev/null @@ -1,116 +0,0 @@ -package collectors - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics" -) - -// NginxRuntimeCollector implements runtime.Collector interface and prometheus.Collector interface. -type NginxRuntimeCollector struct { - // Metrics - reloadsTotal prometheus.Counter - reloadsError prometheus.Counter - configStale prometheus.Gauge - reloadsDuration prometheus.Histogram -} - -// NewManagerMetricsCollector creates a new NginxRuntimeCollector. -func NewManagerMetricsCollector(constLabels map[string]string) *NginxRuntimeCollector { - nc := &NginxRuntimeCollector{ - reloadsTotal: prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "nginx_reloads_total", - Namespace: metrics.Namespace, - Help: "Number of successful NGINX reloads", - ConstLabels: constLabels, - }), - reloadsError: prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "nginx_reload_errors_total", - Namespace: metrics.Namespace, - Help: "Number of unsuccessful NGINX reloads", - ConstLabels: constLabels, - }, - ), - configStale: prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "nginx_stale_config", - Namespace: metrics.Namespace, - Help: "Indicates if NGINX is not serving the latest configuration.", - ConstLabels: constLabels, - }, - ), - reloadsDuration: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "nginx_reloads_milliseconds", - Namespace: metrics.Namespace, - Help: "Duration in milliseconds of NGINX reloads", - ConstLabels: constLabels, - Buckets: []float64{500, 1000, 5000, 10000, 30000}, - }, - ), - } - return nc -} - -// IncReloadCount increments the counter of successful NGINX reloads and sets the stale config status to false. -func (c *NginxRuntimeCollector) IncReloadCount() { - c.reloadsTotal.Inc() - c.updateConfigStaleStatus(false) -} - -// IncReloadErrors increments the counter of NGINX reload errors and sets the stale config status to true. -func (c *NginxRuntimeCollector) IncReloadErrors() { - c.reloadsError.Inc() - c.updateConfigStaleStatus(true) -} - -// updateConfigStaleStatus updates the last NGINX reload status metric. -func (c *NginxRuntimeCollector) updateConfigStaleStatus(stale bool) { - var status float64 - if stale { - status = 1.0 - } - c.configStale.Set(status) -} - -// ObserveLastReloadTime adds the last NGINX reload time to the histogram. -func (c *NginxRuntimeCollector) ObserveLastReloadTime(duration time.Duration) { - c.reloadsDuration.Observe(float64(duration / time.Millisecond)) -} - -// Describe implements prometheus.Collector interface Describe method. -func (c *NginxRuntimeCollector) Describe(ch chan<- *prometheus.Desc) { - c.reloadsTotal.Describe(ch) - c.reloadsError.Describe(ch) - c.configStale.Describe(ch) - c.reloadsDuration.Describe(ch) -} - -// Collect implements the prometheus.Collector interface Collect method. -func (c *NginxRuntimeCollector) Collect(ch chan<- prometheus.Metric) { - c.reloadsTotal.Collect(ch) - c.reloadsError.Collect(ch) - c.configStale.Collect(ch) - c.reloadsDuration.Collect(ch) -} - -// ManagerNoopCollector used to initialize the ManagerCollector when metrics are disabled to avoid nil pointer errors. -type ManagerNoopCollector struct{} - -// NewManagerNoopCollector creates a no-op collector that implements ManagerCollector interface. -func NewManagerNoopCollector() *ManagerNoopCollector { - return &ManagerNoopCollector{} -} - -// IncReloadCount implements a no-op IncReloadCount. -func (c *ManagerNoopCollector) IncReloadCount() {} - -// IncReloadErrors implements a no-op IncReloadErrors. -func (c *ManagerNoopCollector) IncReloadErrors() {} - -// ObserveLastReloadTime implements a no-op ObserveLastReloadTime. -func (c *ManagerNoopCollector) ObserveLastReloadTime(_ time.Duration) {} diff --git a/internal/mode/static/nginx/agent/action.go b/internal/mode/static/nginx/agent/action.go new file mode 100644 index 0000000000..575cbf055b --- /dev/null +++ b/internal/mode/static/nginx/agent/action.go @@ -0,0 +1,92 @@ +package agent + +import ( + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/protobuf/types/known/structpb" +) + +func actionsEqual(a, b []*pb.NGINXPlusAction) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + switch actionA := a[i].Action.(type) { + case *pb.NGINXPlusAction_UpdateHttpUpstreamServers: + actionB, ok := b[i].Action.(*pb.NGINXPlusAction_UpdateHttpUpstreamServers) + if !ok || !httpUpstreamsEqual(actionA.UpdateHttpUpstreamServers, actionB.UpdateHttpUpstreamServers) { + return false + } + case *pb.NGINXPlusAction_UpdateStreamServers: + actionB, ok := b[i].Action.(*pb.NGINXPlusAction_UpdateStreamServers) + if !ok || !streamUpstreamsEqual(actionA.UpdateStreamServers, actionB.UpdateStreamServers) { + return false + } + default: + return false + } + } + + return true +} + +func httpUpstreamsEqual(a, b *pb.UpdateHTTPUpstreamServers) bool { + if a.HttpUpstreamName != b.HttpUpstreamName { + return false + } + + if len(a.Servers) != len(b.Servers) { + return false + } + + for i := range a.Servers { + if !structsEqual(a.Servers[i], b.Servers[i]) { + return false + } + } + + return true +} + +func streamUpstreamsEqual(a, b *pb.UpdateStreamServers) bool { + if a.UpstreamStreamName != b.UpstreamStreamName { + return false + } + + if len(a.Servers) != len(b.Servers) { + return false + } + + for i := range a.Servers { + if !structsEqual(a.Servers[i], b.Servers[i]) { + return false + } + } + + return true +} + +func structsEqual(a, b *structpb.Struct) bool { + if len(a.Fields) != len(b.Fields) { + return false + } + + for key, valueA := range a.Fields { + valueB, exists := b.Fields[key] + if !exists || !valuesEqual(valueA, valueB) { + return false + } + } + + return true +} + +func valuesEqual(a, b *structpb.Value) bool { + switch valueA := a.Kind.(type) { + case *structpb.Value_StringValue: + valueB, ok := b.Kind.(*structpb.Value_StringValue) + return ok && valueA.StringValue == valueB.StringValue + default: + return false + } +} diff --git a/internal/mode/static/nginx/agent/action_test.go b/internal/mode/static/nginx/agent/action_test.go new file mode 100644 index 0000000000..491dbc0dd8 --- /dev/null +++ b/internal/mode/static/nginx/agent/action_test.go @@ -0,0 +1,347 @@ +package agent + +import ( + "testing" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestActionsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + actionA []*pb.NGINXPlusAction + actionB []*pb.NGINXPlusAction + expected bool + }{ + { + name: "Actions are equal", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "Actions have different types", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "upstream1", + }, + }, + }, + }, + expected: false, + }, + { + name: "Actions have different values", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(actionsEqual(tt.actionA, tt.actionB)).To(Equal(tt.expected)) + }) + } +} + +func TestHttpUpstreamsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + upstreamA *pb.UpdateHTTPUpstreamServers + upstreamB *pb.UpdateHTTPUpstreamServers + name string + expected bool + }{ + { + name: "HTTP upstreams are equal", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + expected: true, + }, + { + name: "HTTP upstreams have different upstream names", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream2", + }, + expected: false, + }, + { + name: "HTTP upstreams have different server lengths", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + {Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + { + name: "HTTP upstreams have different server contents", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(httpUpstreamsEqual(tt.upstreamA, tt.upstreamB)).To(Equal(tt.expected)) + }) + } +} + +func TestStreamUpstreamsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + upstreamA *pb.UpdateStreamServers + upstreamB *pb.UpdateStreamServers + name string + expected bool + }{ + { + name: "Stream upstreams are equal", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + expected: true, + }, + { + name: "Stream have different upstream names", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream2", + }, + expected: false, + }, + { + name: "Stream upstreams have different server lengths", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + {Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + { + name: "Stream upstreams have different server contents", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(streamUpstreamsEqual(tt.upstreamA, tt.upstreamB)).To(Equal(tt.expected)) + }) + } +} + +func TestStructsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + structA *structpb.Struct + structB *structpb.Struct + name string + expected bool + }{ + { + name: "Structs are equal", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + expected: true, + }, + { + name: "Structs have different values", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "different"}}}, + }, + expected: false, + }, + { + name: "Structs have different keys", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key1": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(structsEqual(tt.structA, tt.structB)).To(Equal(tt.expected)) + }) + } +} + +func TestValuesEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + valueA *structpb.Value + valueB *structpb.Value + name string + expected bool + }{ + { + name: "Values are equal", + valueA: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + valueB: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + expected: true, + }, + { + name: "Values are not equal", + valueA: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + valueB: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "different"}}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(valuesEqual(tt.valueA, tt.valueB)).To(Equal(tt.expected)) + }) + } +} diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go new file mode 100644 index 0000000000..dbe49deb0c --- /dev/null +++ b/internal/mode/static/nginx/agent/agent.go @@ -0,0 +1,247 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "sort" + "time" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/protobuf/types/known/structpb" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +const retryUpstreamTimeout = 5 * time.Second + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . NginxUpdater + +// NginxUpdater is an interface for updating NGINX using the NGINX agent. +type NginxUpdater interface { + UpdateConfig(deployment *Deployment, files []File) + UpdateUpstreamServers(deployment *Deployment, conf dataplane.Configuration) +} + +// NginxUpdaterImpl implements the NginxUpdater interface. +type NginxUpdaterImpl struct { + CommandService *commandService + FileService *fileService + NginxDeployments *DeploymentStore + logger logr.Logger + plus bool + retryTimeout time.Duration +} + +// NewNginxUpdater returns a new NginxUpdaterImpl instance. +func NewNginxUpdater( + logger logr.Logger, + reader client.Reader, + statusQueue *status.Queue, + resetConnChan <-chan struct{}, + plus bool, +) *NginxUpdaterImpl { + connTracker := agentgrpc.NewConnectionsTracker() + nginxDeployments := NewDeploymentStore(connTracker) + + commandService := newCommandService( + logger.WithName("commandService"), + reader, + nginxDeployments, + connTracker, + statusQueue, + resetConnChan, + ) + fileService := newFileService(logger.WithName("fileService"), nginxDeployments, connTracker) + + return &NginxUpdaterImpl{ + logger: logger, + plus: plus, + NginxDeployments: nginxDeployments, + CommandService: commandService, + FileService: fileService, + retryTimeout: retryUpstreamTimeout, + } +} + +// UpdateConfig sends the nginx configuration to the agent. +// +// The flow of events is as follows: +// - Set the configuration files on the deployment. +// - Broadcast the message containing file metadata to all pods (subscriptions) for the deployment. +// - Agent receives a ConfigApplyRequest with the list of file metadata. +// - Agent calls GetFile for each file in the list, which we send back to the agent. +// - Agent updates nginx, and responds with a DataPlaneResponse. +// - Subscriber responds back to the broadcaster to inform that the transaction is complete. +// - If any errors occurred, they are set on the deployment for the handler to use in the status update. +func (n *NginxUpdaterImpl) UpdateConfig( + deployment *Deployment, + files []File, +) { + msg := deployment.SetFiles(files) + if msg == nil { + return + } + + applied := deployment.GetBroadcaster().Send(*msg) + if applied { + n.logger.Info("Sent nginx configuration to agent") + } + + deployment.SetLatestConfigError(deployment.GetConfigurationStatus()) +} + +// UpdateUpstreamServers sends an APIRequest to the agent to update upstream servers using the NGINX Plus API. +// Only applicable when using NGINX Plus. +func (n *NginxUpdaterImpl) UpdateUpstreamServers( + deployment *Deployment, + conf dataplane.Configuration, +) { + if !n.plus { + return + } + + broadcaster := deployment.GetBroadcaster() + + // reset the latest error to nil now that we're applying new config + deployment.SetLatestUpstreamError(nil) + + var errs []error + var applied bool + actions := make([]*pb.NGINXPlusAction, 0, len(conf.Upstreams)+len(conf.StreamUpstreams)) + for _, upstream := range conf.Upstreams { + action := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: buildHTTPUpstreamServers(upstream), + }, + } + actions = append(actions, action) + } + + for _, upstream := range conf.StreamUpstreams { + action := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: buildStreamUpstreamServers(upstream), + }, + } + actions = append(actions, action) + } + + if actionsEqual(deployment.GetNGINXPlusActions(), actions) { + return + } + + for _, action := range actions { + msg := broadcast.NginxAgentMessage{ + Type: broadcast.APIRequest, + NGINXPlusAction: action, + } + + requestApplied, err := n.sendRequest(broadcaster, msg, deployment) + if err != nil { + errs = append(errs, fmt.Errorf( + "couldn't update upstream via the API: %w", deployment.GetConfigurationStatus())) + } + applied = applied || requestApplied + } + + if len(errs) != 0 { + deployment.SetLatestUpstreamError(errors.Join(errs...)) + } else if applied { + n.logger.Info("Updated upstream servers using NGINX Plus API") + } + + // Store the most recent actions on the deployment so any new subscribers can apply them when first connecting. + deployment.SetNGINXPlusActions(actions) +} + +func buildHTTPUpstreamServers(upstream dataplane.Upstream) *pb.UpdateHTTPUpstreamServers { + return &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: upstream.Name, + Servers: buildUpstreamServers(upstream), + } +} + +func buildStreamUpstreamServers(upstream dataplane.Upstream) *pb.UpdateStreamServers { + return &pb.UpdateStreamServers{ + UpstreamStreamName: upstream.Name, + Servers: buildUpstreamServers(upstream), + } +} + +func buildUpstreamServers(upstream dataplane.Upstream) []*structpb.Struct { + servers := make([]*structpb.Struct, 0, len(upstream.Endpoints)) + + for _, endpoint := range upstream.Endpoints { + port, format := getPortAndIPFormat(endpoint) + value := fmt.Sprintf(format, endpoint.Address, port) + + server := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue(value), + }, + } + + servers = append(servers, server) + } + + // sort the servers to avoid unnecessary reloads + sort.Slice(servers, func(i, j int) bool { + return servers[i].Fields["server"].GetStringValue() < servers[j].Fields["server"].GetStringValue() + }) + + return servers +} + +func (n *NginxUpdaterImpl) sendRequest( + broadcaster broadcast.Broadcaster, + msg broadcast.NginxAgentMessage, + deployment *Deployment, +) (bool, error) { + // retry the API update request because sometimes nginx isn't quite ready after the config apply reload + ctx, cancel := context.WithTimeout(context.Background(), n.retryTimeout) + defer cancel() + + var applied bool + if err := wait.PollUntilContextCancel( + ctx, + 500*time.Millisecond, + true, // poll immediately + func(_ context.Context) (bool, error) { + applied = broadcaster.Send(msg) + if statusErr := deployment.GetConfigurationStatus(); statusErr != nil { + return false, nil //nolint:nilerr // will get error once done polling + } + + return true, nil + }, + ); err != nil { + return applied, err + } + + return applied, nil +} + +func getPortAndIPFormat(ep resolver.Endpoint) (string, string) { + var port string + + if ep.Port != 0 { + port = fmt.Sprintf(":%d", ep.Port) + } + + format := "%s%s" + if ep.IPv6 { + format = "[%s]%s" + } + + return port, format +} diff --git a/internal/mode/static/nginx/agent/agent_test.go b/internal/mode/static/nginx/agent/agent_test.go new file mode 100644 index 0000000000..b0147d4d96 --- /dev/null +++ b/internal/mode/static/nginx/agent/agent_test.go @@ -0,0 +1,396 @@ +package agent + +import ( + "errors" + "fmt" + "testing" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/types/known/structpb" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +func TestUpdateConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + expErr bool + }{ + { + name: "success", + expErr: false, + }, + { + name: "error returned from agent", + expErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + fakeBroadcaster.SendReturns(true) + + plus := false + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, plus) + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + file := File{ + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + } + + testErr := errors.New("test error") + if test.expErr { + deployment.SetPodErrorStatus("pod1", testErr) + } + + updater.UpdateConfig(deployment, []File{file}) + + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(1)) + g.Expect(deployment.GetFile(file.Meta.Name, file.Meta.Hash)).To(Equal(file.Contents)) + + if test.expErr { + g.Expect(deployment.GetLatestConfigError()).To(Equal(testErr)) + // ensure that the error is cleared after the next config is applied + deployment.SetPodErrorStatus("pod1", nil) + file.Meta.Hash = "5678" + updater.UpdateConfig(deployment, []File{file}) + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + } else { + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + } + }) + } +} + +func TestUpdateConfig_NoChange(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, false) + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + file := File{ + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + } + + // Set the initial files on the deployment + deployment.SetFiles([]File{file}) + + // Call UpdateConfig with the same files + updater.UpdateConfig(deployment, []File{file}) + + // Verify that no new configuration was sent + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) +} + +func TestUpdateUpstreamServers(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + buildUpstreams bool + plus bool + expErr bool + }{ + { + name: "success", + plus: true, + buildUpstreams: true, + expErr: false, + }, + { + name: "no upstreams to apply", + plus: true, + buildUpstreams: false, + expErr: false, + }, + { + name: "not running nginx plus", + plus: false, + expErr: false, + }, + { + name: "error returned from agent", + plus: true, + buildUpstreams: true, + expErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, test.plus) + updater.retryTimeout = 0 + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + testErr := errors.New("test error") + if test.expErr { + deployment.SetPodErrorStatus("pod1", testErr) + } + + var conf dataplane.Configuration + if test.buildUpstreams { + conf = dataplane.Configuration{ + Upstreams: []dataplane.Upstream{ + { + Name: "test-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "1.2.3.4", + Port: 8080, + }, + }, + }, + }, + StreamUpstreams: []dataplane.Upstream{ + { + Name: "test-stream-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "5.6.7.8", + }, + }, + }, + }, + } + } + + updater.UpdateUpstreamServers(deployment, conf) + + expActions := make([]*pb.NGINXPlusAction, 0) + if test.buildUpstreams { + expActions = []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "test-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("1.2.3.4:8080"), + }, + }, + }, + }, + }, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "test-stream-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("5.6.7.8"), + }, + }, + }, + }, + }, + }, + } + } + + if !test.plus { + g.Expect(deployment.GetNGINXPlusActions()).To(BeNil()) + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) + } else if test.buildUpstreams { + g.Expect(deployment.GetNGINXPlusActions()).To(Equal(expActions)) + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(2)) + } + + if test.expErr { + expErr := errors.Join( + fmt.Errorf("couldn't update upstream via the API: %w", testErr), + fmt.Errorf("couldn't update upstream via the API: %w", testErr), + ) + + g.Expect(deployment.GetLatestUpstreamError()).To(Equal(expErr)) + // ensure that the error is cleared after the next config is applied + deployment.SetPodErrorStatus("pod1", nil) + updater.UpdateUpstreamServers(deployment, conf) + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) + } else { + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) + } + }) + } +} + +func TestUpdateUpstreamServers_NoChange(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, true) + updater.retryTimeout = 0 + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + conf := dataplane.Configuration{ + Upstreams: []dataplane.Upstream{ + { + Name: "test-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "1.2.3.4", + Port: 8080, + }, + }, + }, + }, + StreamUpstreams: []dataplane.Upstream{ + { + Name: "test-stream-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "5.6.7.8", + }, + }, + }, + }, + } + + initialActions := []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "test-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("1.2.3.4:8080"), + }, + }, + }, + }, + }, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "test-stream-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("5.6.7.8"), + }, + }, + }, + }, + }, + }, + } + deployment.SetNGINXPlusActions(initialActions) + + // Call UpdateUpstreamServers with the same configuration + updater.UpdateUpstreamServers(deployment, conf) + + // Verify that no new actions were sent + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) +} + +func TestGetPortAndIPFormat(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + expPort string + expFormat string + endpoint resolver.Endpoint + }{ + { + name: "IPv4 with port", + endpoint: resolver.Endpoint{ + Address: "1.2.3.4", + Port: 8080, + IPv6: false, + }, + expPort: ":8080", + expFormat: "%s%s", + }, + { + name: "IPv4 without port", + endpoint: resolver.Endpoint{ + Address: "1.2.3.4", + Port: 0, + IPv6: false, + }, + expPort: "", + expFormat: "%s%s", + }, + { + name: "IPv6 with port", + endpoint: resolver.Endpoint{ + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + Port: 8080, + IPv6: true, + }, + expPort: ":8080", + expFormat: "[%s]%s", + }, + { + name: "IPv6 without port", + endpoint: resolver.Endpoint{ + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + Port: 0, + IPv6: true, + }, + expPort: "", + expFormat: "[%s]%s", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + port, format := getPortAndIPFormat(test.endpoint) + g.Expect(port).To(Equal(test.expPort)) + g.Expect(format).To(Equal(test.expFormat)) + }) + } +} diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go b/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go new file mode 100644 index 0000000000..af87b10a8e --- /dev/null +++ b/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go @@ -0,0 +1,230 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package agentfakes + +import ( + "context" + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "k8s.io/apimachinery/pkg/types" +) + +type FakeDeploymentStorer struct { + GetStub func(types.NamespacedName) *agent.Deployment + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 types.NamespacedName + } + getReturns struct { + result1 *agent.Deployment + } + getReturnsOnCall map[int]struct { + result1 *agent.Deployment + } + GetOrStoreStub func(context.Context, types.NamespacedName, chan struct{}) *agent.Deployment + getOrStoreMutex sync.RWMutex + getOrStoreArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 chan struct{} + } + getOrStoreReturns struct { + result1 *agent.Deployment + } + getOrStoreReturnsOnCall map[int]struct { + result1 *agent.Deployment + } + RemoveStub func(types.NamespacedName) + removeMutex sync.RWMutex + removeArgsForCall []struct { + arg1 types.NamespacedName + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeDeploymentStorer) Get(arg1 types.NamespacedName) *agent.Deployment { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeDeploymentStorer) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *FakeDeploymentStorer) GetCalls(stub func(types.NamespacedName) *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *FakeDeploymentStorer) GetArgsForCall(i int) types.NamespacedName { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeDeploymentStorer) GetReturns(result1 *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetReturnsOnCall(i int, result1 *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 *agent.Deployment + }) + } + fake.getReturnsOnCall[i] = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetOrStore(arg1 context.Context, arg2 types.NamespacedName, arg3 chan struct{}) *agent.Deployment { + fake.getOrStoreMutex.Lock() + ret, specificReturn := fake.getOrStoreReturnsOnCall[len(fake.getOrStoreArgsForCall)] + fake.getOrStoreArgsForCall = append(fake.getOrStoreArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 chan struct{} + }{arg1, arg2, arg3}) + stub := fake.GetOrStoreStub + fakeReturns := fake.getOrStoreReturns + fake.recordInvocation("GetOrStore", []interface{}{arg1, arg2, arg3}) + fake.getOrStoreMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeDeploymentStorer) GetOrStoreCallCount() int { + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + return len(fake.getOrStoreArgsForCall) +} + +func (fake *FakeDeploymentStorer) GetOrStoreCalls(stub func(context.Context, types.NamespacedName, chan struct{}) *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = stub +} + +func (fake *FakeDeploymentStorer) GetOrStoreArgsForCall(i int) (context.Context, types.NamespacedName, chan struct{}) { + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + argsForCall := fake.getOrStoreArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeDeploymentStorer) GetOrStoreReturns(result1 *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = nil + fake.getOrStoreReturns = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetOrStoreReturnsOnCall(i int, result1 *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = nil + if fake.getOrStoreReturnsOnCall == nil { + fake.getOrStoreReturnsOnCall = make(map[int]struct { + result1 *agent.Deployment + }) + } + fake.getOrStoreReturnsOnCall[i] = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) Remove(arg1 types.NamespacedName) { + fake.removeMutex.Lock() + fake.removeArgsForCall = append(fake.removeArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.RemoveStub + fake.recordInvocation("Remove", []interface{}{arg1}) + fake.removeMutex.Unlock() + if stub != nil { + fake.RemoveStub(arg1) + } +} + +func (fake *FakeDeploymentStorer) RemoveCallCount() int { + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + return len(fake.removeArgsForCall) +} + +func (fake *FakeDeploymentStorer) RemoveCalls(stub func(types.NamespacedName)) { + fake.removeMutex.Lock() + defer fake.removeMutex.Unlock() + fake.RemoveStub = stub +} + +func (fake *FakeDeploymentStorer) RemoveArgsForCall(i int) types.NamespacedName { + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + argsForCall := fake.removeArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeDeploymentStorer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeDeploymentStorer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ agent.DeploymentStorer = new(FakeDeploymentStorer) diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go new file mode 100644 index 0000000000..f69009ce04 --- /dev/null +++ b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go @@ -0,0 +1,125 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package agentfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" +) + +type FakeNginxUpdater struct { + UpdateConfigStub func(*agent.Deployment, []agent.File) + updateConfigMutex sync.RWMutex + updateConfigArgsForCall []struct { + arg1 *agent.Deployment + arg2 []agent.File + } + UpdateUpstreamServersStub func(*agent.Deployment, dataplane.Configuration) + updateUpstreamServersMutex sync.RWMutex + updateUpstreamServersArgsForCall []struct { + arg1 *agent.Deployment + arg2 dataplane.Configuration + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeNginxUpdater) UpdateConfig(arg1 *agent.Deployment, arg2 []agent.File) { + var arg2Copy []agent.File + if arg2 != nil { + arg2Copy = make([]agent.File, len(arg2)) + copy(arg2Copy, arg2) + } + fake.updateConfigMutex.Lock() + fake.updateConfigArgsForCall = append(fake.updateConfigArgsForCall, struct { + arg1 *agent.Deployment + arg2 []agent.File + }{arg1, arg2Copy}) + stub := fake.UpdateConfigStub + fake.recordInvocation("UpdateConfig", []interface{}{arg1, arg2Copy}) + fake.updateConfigMutex.Unlock() + if stub != nil { + fake.UpdateConfigStub(arg1, arg2) + } +} + +func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + return len(fake.updateConfigArgsForCall) +} + +func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(*agent.Deployment, []agent.File)) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = stub +} + +func (fake *FakeNginxUpdater) UpdateConfigArgsForCall(i int) (*agent.Deployment, []agent.File) { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + argsForCall := fake.updateConfigArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServers(arg1 *agent.Deployment, arg2 dataplane.Configuration) { + fake.updateUpstreamServersMutex.Lock() + fake.updateUpstreamServersArgsForCall = append(fake.updateUpstreamServersArgsForCall, struct { + arg1 *agent.Deployment + arg2 dataplane.Configuration + }{arg1, arg2}) + stub := fake.UpdateUpstreamServersStub + fake.recordInvocation("UpdateUpstreamServers", []interface{}{arg1, arg2}) + fake.updateUpstreamServersMutex.Unlock() + if stub != nil { + fake.UpdateUpstreamServersStub(arg1, arg2) + } +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + return len(fake.updateUpstreamServersArgsForCall) +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func(*agent.Deployment, dataplane.Configuration)) { + fake.updateUpstreamServersMutex.Lock() + defer fake.updateUpstreamServersMutex.Unlock() + fake.UpdateUpstreamServersStub = stub +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersArgsForCall(i int) (*agent.Deployment, dataplane.Configuration) { + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + argsForCall := fake.updateUpstreamServersArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNginxUpdater) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeNginxUpdater) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ agent.NginxUpdater = new(FakeNginxUpdater) diff --git a/internal/mode/static/nginx/agent/broadcast/broadcast.go b/internal/mode/static/nginx/agent/broadcast/broadcast.go new file mode 100644 index 0000000000..ddc0854b3d --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcast.go @@ -0,0 +1,159 @@ +package broadcast + +import ( + "context" + "sync" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "k8s.io/apimachinery/pkg/util/uuid" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . Broadcaster + +// Broadcaster defines an interface for consumers to subscribe to File updates. +type Broadcaster interface { + Subscribe() SubscriberChannels + Send(NginxAgentMessage) bool + CancelSubscription(string) +} + +// SubscriberChannels are the channels sent to the subscriber to listen and respond on. +// The ID is used for map lookup to delete a subscriber when it's gone. +type SubscriberChannels struct { + ListenCh <-chan NginxAgentMessage + ResponseCh chan<- struct{} + ID string +} + +// storedChannels are the same channels used in the SubscriberChannels, but reverse direction. +// These are used to store the channels for the broadcaster to send and listen on, +// and can be looked up in the map using the same ID. +type storedChannels struct { + listenCh chan<- NginxAgentMessage + responseCh <-chan struct{} + id string +} + +// DeploymentBroadcaster sends out a signal when an nginx Deployment has updated +// configuration files. The signal is received by any agent Subscription that cares +// about this Deployment. The agent Subscription will then send a response of whether or not +// the configuration was successfully applied. +type DeploymentBroadcaster struct { + publishCh chan NginxAgentMessage + subCh chan storedChannels + unsubCh chan string + listeners map[string]storedChannels + doneCh chan struct{} +} + +// NewDeploymentBroadcaster returns a new instance of a DeploymentBroadcaster. +func NewDeploymentBroadcaster(ctx context.Context, stopCh chan struct{}) *DeploymentBroadcaster { + broadcaster := &DeploymentBroadcaster{ + listeners: make(map[string]storedChannels), + publishCh: make(chan NginxAgentMessage), + subCh: make(chan storedChannels), + unsubCh: make(chan string), + doneCh: make(chan struct{}), + } + go broadcaster.run(ctx, stopCh) + + return broadcaster +} + +// Subscribe allows a listener to subscribe to broadcast messages. It returns the channel +// to listen on for messages, as well as a channel to respond on. +func (b *DeploymentBroadcaster) Subscribe() SubscriberChannels { + listenCh := make(chan NginxAgentMessage) + responseCh := make(chan struct{}) + id := string(uuid.NewUUID()) + + subscriberChans := SubscriberChannels{ + ID: id, + ListenCh: listenCh, + ResponseCh: responseCh, + } + storedChans := storedChannels{ + id: id, + listenCh: listenCh, + responseCh: responseCh, + } + + b.subCh <- storedChans + return subscriberChans +} + +// Send the message to all listeners. Wait for all listeners to respond. +// Returns true if there were listeners that received the message. +func (b *DeploymentBroadcaster) Send(message NginxAgentMessage) bool { + b.publishCh <- message + <-b.doneCh + + return len(b.listeners) > 0 +} + +// CancelSubscription removes a Subscriber from the channel list. +func (b *DeploymentBroadcaster) CancelSubscription(id string) { + b.unsubCh <- id +} + +// run starts the broadcaster loop. It handles the following events: +// - if stopCh is closed, return. +// - if receiving a new subscriber, add it to the subscriber list. +// - if receiving a canceled subscription, remove it from the subscriber list. +// - if receiving a message to publish, send it to all subscribers. +func (b *DeploymentBroadcaster) run(ctx context.Context, stopCh chan struct{}) { + for { + select { + case <-stopCh: + return + case <-ctx.Done(): + return + case channels := <-b.subCh: + b.listeners[channels.id] = channels + case id := <-b.unsubCh: + delete(b.listeners, id) + case msg := <-b.publishCh: + var wg sync.WaitGroup + wg.Add(len(b.listeners)) + + for _, channels := range b.listeners { + go func() { + defer wg.Done() + + // send message and wait for it to be read + channels.listenCh <- msg + // wait for response + <-channels.responseCh + }() + } + wg.Wait() + + b.doneCh <- struct{}{} + } + } +} + +// MessageType is the type of message to be sent. +type MessageType int + +const ( + // ConfigApplyRequest sends files to update nginx configuration. + ConfigApplyRequest MessageType = iota + // APIRequest sends an NGINX Plus API request to update configuration. + APIRequest +) + +// NginxAgentMessage is sent to all subscribers to send to the nginx agents for either a ConfigApplyRequest +// or an APIActionRequest. +type NginxAgentMessage struct { + // ConfigVersion is the hashed configuration version of the included files. + ConfigVersion string + // NGINXPlusAction is an NGINX Plus API action to be sent. + NGINXPlusAction *pb.NGINXPlusAction + // FileOverviews contain the overviews of all files to be sent. + FileOverviews []*pb.File + // Type defines the type of message to be sent. + Type MessageType +} diff --git a/internal/mode/static/nginx/agent/broadcast/broadcast_test.go b/internal/mode/static/nginx/agent/broadcast/broadcast_test.go new file mode 100644 index 0000000000..950293c4e1 --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcast_test.go @@ -0,0 +1,108 @@ +package broadcast_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" +) + +func TestSubscribe(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber := broadcaster.Subscribe() + g.Expect(subscriber.ID).NotTo(BeEmpty()) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeTrue()) + }() + + g.Eventually(subscriber.ListenCh).Should(Receive(Equal(message))) +} + +func TestSubscribe_MultipleListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber1 := broadcaster.Subscribe() + subscriber2 := broadcaster.Subscribe() + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeTrue()) + }() + + g.Eventually(subscriber1.ListenCh).Should(Receive(Equal(message))) + g.Eventually(subscriber2.ListenCh).Should(Receive(Equal(message))) + + subscriber1.ResponseCh <- struct{}{} + subscriber2.ResponseCh <- struct{}{} +} + +func TestSubscribe_NoListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + result := broadcaster.Send(message) + g.Expect(result).To(BeFalse()) +} + +func TestCancelSubscription(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber := broadcaster.Subscribe() + + broadcaster.CancelSubscription(subscriber.ID) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeFalse()) + }() + + g.Consistently(subscriber.ListenCh).ShouldNot(Receive()) +} diff --git a/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go b/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go new file mode 100644 index 0000000000..0d820ef98a --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go @@ -0,0 +1,215 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package broadcastfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" +) + +type FakeBroadcaster struct { + CancelSubscriptionStub func(string) + cancelSubscriptionMutex sync.RWMutex + cancelSubscriptionArgsForCall []struct { + arg1 string + } + SendStub func(broadcast.NginxAgentMessage) bool + sendMutex sync.RWMutex + sendArgsForCall []struct { + arg1 broadcast.NginxAgentMessage + } + sendReturns struct { + result1 bool + } + sendReturnsOnCall map[int]struct { + result1 bool + } + SubscribeStub func() broadcast.SubscriberChannels + subscribeMutex sync.RWMutex + subscribeArgsForCall []struct { + } + subscribeReturns struct { + result1 broadcast.SubscriberChannels + } + subscribeReturnsOnCall map[int]struct { + result1 broadcast.SubscriberChannels + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeBroadcaster) CancelSubscription(arg1 string) { + fake.cancelSubscriptionMutex.Lock() + fake.cancelSubscriptionArgsForCall = append(fake.cancelSubscriptionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.CancelSubscriptionStub + fake.recordInvocation("CancelSubscription", []interface{}{arg1}) + fake.cancelSubscriptionMutex.Unlock() + if stub != nil { + fake.CancelSubscriptionStub(arg1) + } +} + +func (fake *FakeBroadcaster) CancelSubscriptionCallCount() int { + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + return len(fake.cancelSubscriptionArgsForCall) +} + +func (fake *FakeBroadcaster) CancelSubscriptionCalls(stub func(string)) { + fake.cancelSubscriptionMutex.Lock() + defer fake.cancelSubscriptionMutex.Unlock() + fake.CancelSubscriptionStub = stub +} + +func (fake *FakeBroadcaster) CancelSubscriptionArgsForCall(i int) string { + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + argsForCall := fake.cancelSubscriptionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBroadcaster) Send(arg1 broadcast.NginxAgentMessage) bool { + fake.sendMutex.Lock() + ret, specificReturn := fake.sendReturnsOnCall[len(fake.sendArgsForCall)] + fake.sendArgsForCall = append(fake.sendArgsForCall, struct { + arg1 broadcast.NginxAgentMessage + }{arg1}) + stub := fake.SendStub + fakeReturns := fake.sendReturns + fake.recordInvocation("Send", []interface{}{arg1}) + fake.sendMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBroadcaster) SendCallCount() int { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + return len(fake.sendArgsForCall) +} + +func (fake *FakeBroadcaster) SendCalls(stub func(broadcast.NginxAgentMessage) bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = stub +} + +func (fake *FakeBroadcaster) SendArgsForCall(i int) broadcast.NginxAgentMessage { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + argsForCall := fake.sendArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBroadcaster) SendReturns(result1 bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + fake.sendReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeBroadcaster) SendReturnsOnCall(i int, result1 bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + if fake.sendReturnsOnCall == nil { + fake.sendReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.sendReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *FakeBroadcaster) Subscribe() broadcast.SubscriberChannels { + fake.subscribeMutex.Lock() + ret, specificReturn := fake.subscribeReturnsOnCall[len(fake.subscribeArgsForCall)] + fake.subscribeArgsForCall = append(fake.subscribeArgsForCall, struct { + }{}) + stub := fake.SubscribeStub + fakeReturns := fake.subscribeReturns + fake.recordInvocation("Subscribe", []interface{}{}) + fake.subscribeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBroadcaster) SubscribeCallCount() int { + fake.subscribeMutex.RLock() + defer fake.subscribeMutex.RUnlock() + return len(fake.subscribeArgsForCall) +} + +func (fake *FakeBroadcaster) SubscribeCalls(stub func() broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = stub +} + +func (fake *FakeBroadcaster) SubscribeReturns(result1 broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = nil + fake.subscribeReturns = struct { + result1 broadcast.SubscriberChannels + }{result1} +} + +func (fake *FakeBroadcaster) SubscribeReturnsOnCall(i int, result1 broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = nil + if fake.subscribeReturnsOnCall == nil { + fake.subscribeReturnsOnCall = make(map[int]struct { + result1 broadcast.SubscriberChannels + }) + } + fake.subscribeReturnsOnCall[i] = struct { + result1 broadcast.SubscriberChannels + }{result1} +} + +func (fake *FakeBroadcaster) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + fake.subscribeMutex.RLock() + defer fake.subscribeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeBroadcaster) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ broadcast.Broadcaster = new(FakeBroadcaster) diff --git a/internal/mode/static/nginx/agent/broadcast/doc.go b/internal/mode/static/nginx/agent/broadcast/doc.go new file mode 100644 index 0000000000..3640dcfa5e --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/doc.go @@ -0,0 +1,5 @@ +/* +Package broadcast contains the functions for creating a broadcaster to send updates to consumers. +It is used to send nginx configuration for an nginx Deployment to all pod subscribers for that Deployment. +*/ +package broadcast diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go new file mode 100644 index 0000000000..8f694e581d --- /dev/null +++ b/internal/mode/static/nginx/agent/command.go @@ -0,0 +1,530 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcStatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +const connectionWaitTimeout = 30 * time.Second + +// commandService handles the connection and subscription to the data plane agent. +type commandService struct { + pb.CommandServiceServer + nginxDeployments *DeploymentStore + statusQueue *status.Queue + resetConnChan <-chan struct{} + connTracker agentgrpc.ConnectionsTracker + k8sReader client.Reader + logger logr.Logger + connectionTimeout time.Duration +} + +func newCommandService( + logger logr.Logger, + reader client.Reader, + depStore *DeploymentStore, + connTracker agentgrpc.ConnectionsTracker, + statusQueue *status.Queue, + resetConnChan <-chan struct{}, +) *commandService { + return &commandService{ + connectionTimeout: connectionWaitTimeout, + k8sReader: reader, + logger: logger, + connTracker: connTracker, + nginxDeployments: depStore, + statusQueue: statusQueue, + resetConnChan: resetConnChan, + } +} + +func (cs *commandService) Register(server *grpc.Server) { + pb.RegisterCommandServiceServer(server, cs) +} + +// CreateConnection registers a data plane agent with the control plane. +// The nginx InstanceID could be empty if the agent hasn't discovered its nginx instance yet. +// Once discovered, the agent will send an UpdateDataPlaneStatus request with the nginx InstanceID set. +func (cs *commandService) CreateConnection( + ctx context.Context, + req *pb.CreateConnectionRequest, +) (*pb.CreateConnectionResponse, error) { + if req == nil { + return nil, errors.New("empty connection request") + } + + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + resource := req.GetResource() + podName := resource.GetContainerInfo().GetHostname() + cs.logger.Info(fmt.Sprintf("Creating connection for nginx pod: %s", podName)) + + owner, err := cs.getPodOwner(podName) + if err != nil { + response := &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_ERROR, + Message: "error getting pod owner", + Error: err.Error(), + }, + } + cs.logger.Error(err, "error getting pod owner") + return response, grpcStatus.Errorf(codes.Internal, "error getting pod owner %s", err.Error()) + } + + conn := agentgrpc.Connection{ + Parent: owner, + PodName: podName, + InstanceID: getNginxInstanceID(resource.GetInstances()), + } + cs.connTracker.Track(gi.IPAddress, conn) + + return &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + }, nil +} + +// Subscribe is a decoupled communication mechanism between the data plane agent and control plane. +// The series of events are as follows: +// - Wait for the agent to register its nginx instance with the control plane. +// - Grab the most recent deployment configuration for itself, and attempt to apply it. +// - Subscribe to any future updates from the NginxUpdater and start a loop to listen for those updates. +// If any connection or unrecoverable errors occur, return and agent should re-establish a subscription. +// If errors occur with applying the config, log and put those errors into the status queue to be written +// to the Gateway status. +// +//nolint:gocyclo // could be room for improvement here +func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error { + ctx := in.Context() + + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return agentgrpc.ErrStatusInvalidConnection + } + defer cs.connTracker.RemoveConnection(gi.IPAddress) + + // wait for the agent to report itself and nginx + conn, deployment, err := cs.waitForConnection(ctx, gi) + if err != nil { + cs.logger.Error(err, "error waiting for connection") + return err + } + defer deployment.RemovePodStatus(conn.PodName) + + cs.logger.Info(fmt.Sprintf("Successfully connected to nginx agent %s", conn.PodName)) + + msgr := messenger.New(in) + go msgr.Run(ctx) + + // apply current config before starting event loop + if err := cs.setInitialConfig(ctx, deployment, conn, msgr); err != nil { + return err + } + + // subscribe to the deployment broadcaster to get file updates + broadcaster := deployment.GetBroadcaster() + channels := broadcaster.Subscribe() + defer broadcaster.CancelSubscription(channels.ID) + + for { + // When a message is received over the ListenCh, it is assumed and required that the + // deployment object is already LOCKED. This lock is acquired by the event handler before calling + // `updateNginxConfig`. The entire transaction (as described in above in the function comment) + // must be locked to prevent the deployment files from changing during the transaction. + // This means that the lock is held until we receive either an error or response from agent + // (via msgr.Errors() or msgr.Messages()) and respond back, finally returning to the event handler + // which releases the lock. + select { + case <-ctx.Done(): + select { + case channels.ResponseCh <- struct{}{}: + default: + } + return grpcStatus.Error(codes.Canceled, context.Cause(ctx).Error()) + case <-cs.resetConnChan: + return grpcStatus.Error(codes.Unavailable, "TLS files updated") + case msg := <-channels.ListenCh: + var req *pb.ManagementPlaneRequest + switch msg.Type { + case broadcast.ConfigApplyRequest: + req = buildRequest(msg.FileOverviews, conn.InstanceID, msg.ConfigVersion) + case broadcast.APIRequest: + req = buildPlusAPIRequest(msg.NGINXPlusAction, conn.InstanceID) + default: + panic(fmt.Sprintf("unknown request type %d", msg.Type)) + } + + cs.logger.V(1).Info("Sending configuration to agent", "requestType", msg.Type) + if err := msgr.Send(ctx, req); err != nil { + cs.logger.Error(err, "error sending request to agent") + deployment.SetPodErrorStatus(conn.PodName, err) + channels.ResponseCh <- struct{}{} + + return grpcStatus.Error(codes.Internal, err.Error()) + } + case err = <-msgr.Errors(): + cs.logger.Error(err, "connection error", "pod", conn.PodName) + deployment.SetPodErrorStatus(conn.PodName, err) + select { + case channels.ResponseCh <- struct{}{}: + default: + } + + if errors.Is(err, io.EOF) { + return grpcStatus.Error(codes.Aborted, err.Error()) + } + return grpcStatus.Error(codes.Internal, err.Error()) + case msg := <-msgr.Messages(): + res := msg.GetCommandResponse() + if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { + if isRollbackMessage(res.GetMessage()) { + // we don't care about these messages, so ignore them + continue + } + err := fmt.Errorf("msg: %s; error: %s", res.GetMessage(), res.GetError()) + deployment.SetPodErrorStatus(conn.PodName, err) + } else { + deployment.SetPodErrorStatus(conn.PodName, nil) + } + channels.ResponseCh <- struct{}{} + } + } +} + +func (cs *commandService) waitForConnection( + ctx context.Context, + gi grpcContext.GrpcInfo, +) (*agentgrpc.Connection, *Deployment, error) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + timer := time.NewTimer(cs.connectionTimeout) + defer timer.Stop() + + agentConnectErr := errors.New("timed out waiting for agent to register nginx") + deploymentStoreErr := errors.New("timed out waiting for nginx deployment to be added to store") + + var err error + for { + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-timer.C: + return nil, nil, err + case <-ticker.C: + if conn := cs.connTracker.GetConnection(gi.IPAddress); conn.Ready() { + // connection has been established, now ensure that the deployment exists in the store + if deployment := cs.nginxDeployments.Get(conn.Parent); deployment != nil { + return &conn, deployment, nil + } + err = deploymentStoreErr + continue + } + err = agentConnectErr + } + } +} + +// setInitialConfig gets the initial configuration for this connection and applies it. +func (cs *commandService) setInitialConfig( + ctx context.Context, + deployment *Deployment, + conn *agentgrpc.Connection, + msgr messenger.Messenger, +) error { + deployment.FileLock.Lock() + defer deployment.FileLock.Unlock() + + fileOverviews, configVersion := deployment.GetFileOverviews() + if err := msgr.Send(ctx, buildRequest(fileOverviews, conn.InstanceID, configVersion)); err != nil { + cs.logAndSendErrorStatus(deployment, conn, err) + + return grpcStatus.Error(codes.Internal, err.Error()) + } + + applyErr, connErr := cs.waitForInitialConfigApply(ctx, msgr) + if connErr != nil { + cs.logger.Error(connErr, "error setting initial configuration") + + return connErr + } + + errs := []error{applyErr} + for _, action := range deployment.GetNGINXPlusActions() { + // retry the API update request because sometimes nginx isn't quite ready after the config apply reload + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + var overallUpstreamApplyErr error + + if err := wait.PollUntilContextCancel( + timeoutCtx, + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + if err := msgr.Send(ctx, buildPlusAPIRequest(action, conn.InstanceID)); err != nil { + cs.logAndSendErrorStatus(deployment, conn, err) + + return false, grpcStatus.Error(codes.Internal, err.Error()) + } + + upstreamApplyErr, connErr := cs.waitForInitialConfigApply(ctx, msgr) + if connErr != nil { + cs.logger.Error(connErr, "error setting initial configuration") + + return false, connErr + } + + if upstreamApplyErr != nil { + overallUpstreamApplyErr = errors.Join(overallUpstreamApplyErr, upstreamApplyErr) + return false, nil + } + return true, nil + }, + ); err != nil { + if overallUpstreamApplyErr != nil { + errs = append(errs, overallUpstreamApplyErr) + } else { + cancel() + return err + } + } + cancel() + } + // send the status (error or nil) to the status queue + cs.logAndSendErrorStatus(deployment, conn, errors.Join(errs...)) + + return nil +} + +// waitForInitialConfigApply waits for the nginx agent to respond after a Subscriber attempts +// to apply its initial config. +// Two errors are returned +// - applyErr is an error applying the configuration +// - connectionErr is an error with the connection or sending the configuration +// The caller treats a connectionErr as unrecoverable, while the applyErr is used +// to set the status on the Gateway resources. +func (cs *commandService) waitForInitialConfigApply( + ctx context.Context, + msgr messenger.Messenger, +) (applyErr error, connectionErr error) { + for { + select { + case <-ctx.Done(): + return nil, grpcStatus.Error(codes.Canceled, context.Cause(ctx).Error()) + case err := <-msgr.Errors(): + if errors.Is(err, io.EOF) { + return nil, grpcStatus.Error(codes.Aborted, err.Error()) + } + return nil, grpcStatus.Error(codes.Internal, err.Error()) + case msg := <-msgr.Messages(): + res := msg.GetCommandResponse() + if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { + applyErr := fmt.Errorf("msg: %s; error: %s", res.GetMessage(), res.GetError()) + return applyErr, nil + } + + return applyErr, connectionErr + } + } +} + +// logAndSendErrorStatus logs an error, sets it on the Deployment object for that Pod, and then sends +// the full Deployment error status to the status queue. This ensures that any other Pod errors that already +// exist on the Deployment are not overwritten. +// If the error is nil, then we just enqueue the nil value and don't log it, which indicates success. +func (cs *commandService) logAndSendErrorStatus(deployment *Deployment, conn *agentgrpc.Connection, err error) { + if err != nil { + cs.logger.Error(err, "error sending request to agent") + } else { + cs.logger.Info("Successfully configured nginx for new subscription", "pod", conn.PodName) + } + deployment.SetPodErrorStatus(conn.PodName, err) + + queueObj := &status.QueueObject{ + Deployment: conn.Parent, + Error: deployment.GetConfigurationStatus(), + UpdateType: status.UpdateAll, + } + cs.statusQueue.Enqueue(queueObj) +} + +func buildRequest(fileOverviews []*pb.File, instanceID, version string) *pb.ManagementPlaneRequest { + return &pb.ManagementPlaneRequest{ + MessageMeta: &pb.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: uuid.NewString(), + Timestamp: timestamppb.Now(), + }, + Request: &pb.ManagementPlaneRequest_ConfigApplyRequest{ + ConfigApplyRequest: &pb.ConfigApplyRequest{ + Overview: &pb.FileOverview{ + Files: fileOverviews, + ConfigVersion: &pb.ConfigVersion{ + InstanceId: instanceID, + Version: version, + }, + }, + }, + }, + } +} + +func isRollbackMessage(msg string) bool { + msgToLower := strings.ToLower(msg) + return strings.Contains(msgToLower, "rollback successful") || + strings.Contains(msgToLower, "rollback failed") +} + +func buildPlusAPIRequest(action *pb.NGINXPlusAction, instanceID string) *pb.ManagementPlaneRequest { + return &pb.ManagementPlaneRequest{ + MessageMeta: &pb.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: uuid.NewString(), + Timestamp: timestamppb.Now(), + }, + Request: &pb.ManagementPlaneRequest_ActionRequest{ + ActionRequest: &pb.APIActionRequest{ + InstanceId: instanceID, + Action: &pb.APIActionRequest_NginxPlusAction{ + NginxPlusAction: action, + }, + }, + }, + } +} + +func (cs *commandService) getPodOwner(podName string) (types.NamespacedName, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + var pods v1.PodList + listOpts := &client.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": podName}), + } + if err := cs.k8sReader.List(ctx, &pods, listOpts); err != nil { + return types.NamespacedName{}, fmt.Errorf("error listing pods: %w", err) + } + + if len(pods.Items) == 0 { + return types.NamespacedName{}, fmt.Errorf("no pods found with name %q", podName) + } + + if len(pods.Items) > 1 { + return types.NamespacedName{}, fmt.Errorf("should only be one pod with name %q", podName) + } + pod := pods.Items[0] + + podOwnerRefs := pod.GetOwnerReferences() + if len(podOwnerRefs) != 1 { + return types.NamespacedName{}, fmt.Errorf("expected one owner reference of the nginx Pod, got %d", len(podOwnerRefs)) + } + + if podOwnerRefs[0].Kind != "ReplicaSet" { + err := fmt.Errorf("expected pod owner reference to be ReplicaSet, got %s", podOwnerRefs[0].Kind) + return types.NamespacedName{}, err + } + + var replicaSet appsv1.ReplicaSet + var replicaSetErr error + if err := wait.PollUntilContextCancel( + ctx, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + if err := cs.k8sReader.Get( + ctx, + types.NamespacedName{Namespace: pod.Namespace, Name: podOwnerRefs[0].Name}, + &replicaSet, + ); err != nil { + replicaSetErr = err + return false, nil //nolint:nilerr // error is returned at the end + } + + return true, nil + }, + ); err != nil { + return types.NamespacedName{}, fmt.Errorf("failed to get nginx Pod's ReplicaSet: %w", replicaSetErr) + } + + replicaOwnerRefs := replicaSet.GetOwnerReferences() + if len(replicaOwnerRefs) != 1 { + err := fmt.Errorf("expected one owner reference of the nginx ReplicaSet, got %d", len(replicaOwnerRefs)) + return types.NamespacedName{}, err + } + + return types.NamespacedName{Namespace: pod.Namespace, Name: replicaOwnerRefs[0].Name}, nil +} + +// UpdateDataPlaneStatus is called by agent on startup and upon any change in agent metadata, +// instance metadata, or configurations. InstanceID may not be set on an initial CreateConnection, +// and will instead be set on a call to UpdateDataPlaneStatus once the agent discovers its nginx instance. +func (cs *commandService) UpdateDataPlaneStatus( + ctx context.Context, + req *pb.UpdateDataPlaneStatusRequest, +) (*pb.UpdateDataPlaneStatusResponse, error) { + if req == nil { + return nil, errors.New("empty UpdateDataPlaneStatus request") + } + + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + instanceID := getNginxInstanceID(req.GetResource().GetInstances()) + if instanceID == "" { + return nil, grpcStatus.Errorf(codes.InvalidArgument, "request does not contain nginx instanceID") + } + + cs.connTracker.SetInstanceID(gi.IPAddress, instanceID) + + return &pb.UpdateDataPlaneStatusResponse{}, nil +} + +func getNginxInstanceID(instances []*pb.Instance) string { + for _, instance := range instances { + instanceType := instance.GetInstanceMeta().GetInstanceType() + if instanceType == pb.InstanceMeta_INSTANCE_TYPE_NGINX || + instanceType == pb.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS { + return instance.GetInstanceMeta().GetInstanceId() + } + } + + return "" +} + +// UpdateDataPlaneHealth includes full health information about the data plane as reported by the agent. +func (cs *commandService) UpdateDataPlaneHealth( + _ context.Context, + _ *pb.UpdateDataPlaneHealthRequest, +) (*pb.UpdateDataPlaneHealthResponse, error) { + return &pb.UpdateDataPlaneHealthResponse{}, nil +} diff --git a/internal/mode/static/nginx/agent/command_test.go b/internal/mode/static/nginx/agent/command_test.go new file mode 100644 index 0000000000..167aab860a --- /dev/null +++ b/internal/mode/static/nginx/agent/command_test.go @@ -0,0 +1,990 @@ +package agent + +import ( + "context" + "errors" + "io" + "testing" + "time" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +type mockSubscribeServer struct { + grpc.ServerStream + ctx context.Context + recvChan chan *pb.DataPlaneResponse + sendChan chan *pb.ManagementPlaneRequest +} + +func newMockSubscribeServer(ctx context.Context) *mockSubscribeServer { + return &mockSubscribeServer{ + ctx: ctx, + recvChan: make(chan *pb.DataPlaneResponse, 1), + sendChan: make(chan *pb.ManagementPlaneRequest, 1), + } +} + +func (m *mockSubscribeServer) Send(msg *pb.ManagementPlaneRequest) error { + m.sendChan <- msg + return nil +} + +func (m *mockSubscribeServer) Recv() (*pb.DataPlaneResponse, error) { + req, ok := <-m.recvChan + if !ok { + return nil, io.EOF + } + return req, nil +} + +func (m *mockSubscribeServer) Context() context.Context { + return m.ctx +} + +func createFakeK8sClient(initObjs ...runtime.Object) (client.Client, error) { + fakeClient := fake.NewFakeClient(initObjs...) + if err := fake.AddIndex(fakeClient, &v1.Pod{}, "metadata.name", func(obj client.Object) []string { + return []string{obj.GetName()} + }); err != nil { + return nil, err + } + + return fakeClient, nil +} + +func createGrpcContext() context.Context { + return grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) +} + +func createGrpcContextWithCancel() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + return grpcContext.NewGrpcContext(ctx, grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }), cancel +} + +func TestCreateConnection(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + request *pb.CreateConnectionRequest + response *pb.CreateConnectionResponse + ctx context.Context + errString string + }{ + { + name: "successfully tracks a connection", + ctx: createGrpcContext(), + request: &pb.CreateConnectionRequest{ + Resource: &pb.Resource{ + Info: &pb.Resource_ContainerInfo{ + ContainerInfo: &pb.ContainerInfo{ + Hostname: "nginx-pod", + }, + }, + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX, + }, + }, + }, + }, + }, + response: &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + }, + }, + { + name: "request is nil", + request: nil, + response: nil, + errString: "empty connection request", + }, + { + name: "context is missing data", + ctx: context.Background(), + request: &pb.CreateConnectionRequest{}, + response: nil, + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "error getting pod owner", + ctx: createGrpcContext(), + request: &pb.CreateConnectionRequest{ + Resource: &pb.Resource{ + Info: &pb.Resource_ContainerInfo{ + ContainerInfo: &pb.ContainerInfo{ + Hostname: "nginx-pod", + }, + }, + }, + }, + response: &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_ERROR, + Message: "error getting pod owner", + Error: "no pods found with name \"nginx-pod\"", + }, + }, + errString: "error getting pod owner", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + var objs []runtime.Object + if test.errString == "" { + pod := &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + } + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + }, + }, + } + + objs = []runtime.Object{pod, replicaSet} + } + + fakeClient, err := createFakeK8sClient(objs...) + g.Expect(err).ToNot(HaveOccurred()) + + cs := newCommandService( + logr.Discard(), + fakeClient, + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + nil, + ) + + resp, err := cs.CreateConnection(test.ctx, test.request) + g.Expect(resp).To(Equal(test.response)) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(connTracker.TrackCallCount()).To(Equal(1)) + + expConn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + + key, conn := connTracker.TrackArgsForCall(0) + g.Expect(key).To(Equal("127.0.0.1")) + g.Expect(conn).To(Equal(expConn)) + }) + } +} + +func ensureFileWasSent( + g *WithT, + server *mockSubscribeServer, + expFile *pb.File, +) { + var req *pb.ManagementPlaneRequest + g.Eventually(func() *pb.ManagementPlaneRequest { + req = <-server.sendChan + return req + }).ShouldNot(BeNil()) + + g.Expect(req.GetConfigApplyRequest()).ToNot(BeNil()) + overview := req.GetConfigApplyRequest().GetOverview() + g.Expect(overview).ToNot(BeNil()) + g.Expect(overview.Files).To(ContainElement(expFile)) +} + +func ensureAPIRequestWasSent( + g *WithT, + server *mockSubscribeServer, + expAction *pb.NGINXPlusAction, +) { + var req *pb.ManagementPlaneRequest + g.Eventually(func() *pb.ManagementPlaneRequest { + req = <-server.sendChan + return req + }).ShouldNot(BeNil()) + + g.Expect(req.GetActionRequest()).ToNot(BeNil()) + action := req.GetActionRequest().GetNginxPlusAction() + g.Expect(action).To(Equal(expAction)) +} + +func verifyResponse( + g *WithT, + server *mockSubscribeServer, + responseCh chan struct{}, +) { + server.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + g.Eventually(func() struct{} { + return <-responseCh + }).Should(Equal(struct{}{})) +} + +func TestSubscribe(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + connTracker.GetConnectionReturns(conn) + + store := NewDeploymentStore(&connTracker) + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + store, + &connTracker, + status.NewQueue(), + nil, + ) + + broadcaster := &broadcastfakes.FakeBroadcaster{} + responseCh := make(chan struct{}) + listenCh := make(chan broadcast.NginxAgentMessage, 2) + subChannels := broadcast.SubscriberChannels{ + ListenCh: listenCh, + ResponseCh: responseCh, + } + broadcaster.SubscribeReturns(subChannels) + + // set the initial files and actions to be applied by the Subscription + deployment := store.StoreWithBroadcaster(conn.Parent, broadcaster) + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + Contents: []byte("file contents"), + }, + } + deployment.SetFiles(files) + + initialAction := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + } + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{initialAction}) + + ctx, cancel := createGrpcContextWithCancel() + defer cancel() + + mockServer := newMockSubscribeServer(ctx) + + // put the requests on the listenCh for the Subscription loop to pick up + loopFile := &pb.File{ + FileMeta: &pb.FileMeta{ + Name: "some-other.conf", + Hash: "56789", + }, + } + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.ConfigApplyRequest, + FileOverviews: []*pb.File{loopFile}, + } + + loopAction := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateStreamServers{}, + } + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.APIRequest, + NGINXPlusAction: loopAction, + } + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + // ensure that the initial config file was sent when the Subscription connected + expFile := &pb.File{ + FileMeta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + } + ensureFileWasSent(g, mockServer, expFile) + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + // ensure that the initial API request was sent when the Subscription connected + ensureAPIRequestWasSent(g, mockServer, initialAction) + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + g.Eventually(func() string { + obj := cs.statusQueue.Dequeue(ctx) + return obj.Deployment.Name + }).Should(Equal("nginx-deployment")) + + // ensure the second file was sent in the loop + ensureFileWasSent(g, mockServer, loopFile) + verifyResponse(g, mockServer, responseCh) + + // ensure the second action was sent in the loop + ensureAPIRequestWasSent(g, mockServer, loopAction) + verifyResponse(g, mockServer, responseCh) + + g.Eventually(func() map[string]error { + return deployment.podStatuses + }).Should(HaveKey("nginx-pod")) + + cancel() + + g.Eventually(func() error { + return <-errCh + }).Should(MatchError(ContainSubstring("context canceled"))) + + g.Expect(deployment.podStatuses).ToNot(HaveKey("nginx-pod")) +} + +func TestSubscribe_Reset(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + connTracker.GetConnectionReturns(conn) + + store := NewDeploymentStore(&connTracker) + resetChan := make(chan struct{}) + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + store, + &connTracker, + status.NewQueue(), + resetChan, + ) + + broadcaster := &broadcastfakes.FakeBroadcaster{} + responseCh := make(chan struct{}) + listenCh := make(chan broadcast.NginxAgentMessage, 2) + subChannels := broadcast.SubscriberChannels{ + ListenCh: listenCh, + ResponseCh: responseCh, + } + broadcaster.SubscribeReturns(subChannels) + + // set the initial files to be applied by the Subscription + deployment := store.StoreWithBroadcaster(conn.Parent, broadcaster) + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + Contents: []byte("file contents"), + }, + } + deployment.SetFiles(files) + + ctx, cancel := createGrpcContextWithCancel() + defer cancel() + + mockServer := newMockSubscribeServer(ctx) + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + // ensure initial config is read to unblock read channel + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + resetChan <- struct{}{} + + g.Eventually(func() error { + err := <-errCh + g.Expect(err).To(HaveOccurred()) + return err + }).Should(MatchError(ContainSubstring("TLS files updated"))) +} + +func TestSubscribe_Errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func( + cs *commandService, + ct *agentgrpcfakes.FakeConnectionsTracker, + ) + ctx context.Context + errString string + }{ + { + name: "context is missing data", + ctx: context.Background(), + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "error waiting for connection; not connected", + setup: func( + cs *commandService, + _ *agentgrpcfakes.FakeConnectionsTracker, + ) { + cs.connectionTimeout = 1100 * time.Millisecond + }, + errString: "timed out waiting for agent to register nginx", + }, + { + name: "error waiting for connection; deployment not tracked", + setup: func( + cs *commandService, + ct *agentgrpcfakes.FakeConnectionsTracker, + ) { + ct.GetConnectionReturns(agentgrpc.Connection{InstanceID: "nginx-id"}) + cs.connectionTimeout = 1100 * time.Millisecond + }, + errString: "timed out waiting for nginx deployment to be added to store", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + nil, + ) + + if test.setup != nil { + test.setup(cs, &connTracker) + } + + var ctx context.Context + var cancel context.CancelFunc + + if test.ctx != nil { + ctx = test.ctx + } else { + ctx, cancel = createGrpcContextWithCancel() + defer cancel() + } + + mockServer := newMockSubscribeServer(ctx) + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + g.Eventually(func() error { + err := <-errCh + g.Expect(err).To(HaveOccurred()) + return err + }).Should(MatchError(ContainSubstring(test.errString))) + }) + } +} + +func TestSetInitialConfig_Errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) + errString string + }{ + { + name: "error sending initial config", + setup: func(msgr *messengerfakes.FakeMessenger, _ *Deployment) { + msgr.SendReturns(errors.New("send error")) + }, + errString: "send error", + }, + { + name: "error waiting for initial config apply", + setup: func(msgr *messengerfakes.FakeMessenger, _ *Deployment) { + errCh := make(chan error, 1) + msgr.ErrorsReturns(errCh) + errCh <- errors.New("apply error") + }, + errString: "apply error", + }, + { + name: "error sending initial API request", + setup: func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) { + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + }) + msgCh := make(chan *pb.DataPlaneResponse, 1) + msgr.MessagesReturns(msgCh) + msgCh <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + msgr.SendReturnsOnCall(1, errors.New("api send error")) + }, + errString: "api send error", + }, + { + name: "error waiting for initial API request apply", + setup: func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) { + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + }) + msgCh := make(chan *pb.DataPlaneResponse, 1) + msgr.MessagesReturns(msgCh) + msgCh <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + errCh := make(chan error, 1) + msgr.ErrorsReturns(errCh) + errCh <- errors.New("api apply error") + }, + errString: "api apply error", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + msgr := &messengerfakes.FakeMessenger{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + nil, + ) + + conn := &agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + if test.setup != nil { + test.setup(msgr, deployment) + } + + err := cs.setInitialConfig(context.Background(), deployment, conn, msgr) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + }) + } +} + +func TestGetPodOwner(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + podName string + podList *v1.PodList + replicaSet *appsv1.ReplicaSet + errString string + expected types.NamespacedName + }{ + { + name: "successfully gets pod owner", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + }, + }, + }, + expected: types.NamespacedName{ + Namespace: "test", + Name: "nginx-deployment", + }, + }, + { + name: "error listing pods", + podName: "nginx-pod", + podList: &v1.PodList{}, + replicaSet: &appsv1.ReplicaSet{}, + errString: "no pods found", + }, + { + name: "multiple pods with same name", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "nginx-pod"}}, + {ObjectMeta: metav1.ObjectMeta{Namespace: "test2", Name: "nginx-pod"}}, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "should only be one pod with name", + }, + { + name: "pod owner reference is not ReplicaSet", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Owner", + Name: "nginx-owner", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "expected pod owner reference to be ReplicaSet", + }, + { + name: "pod has multiple owners", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + { + Kind: "ReplicaSet", + Name: "nginx-replicaset2", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "expected one owner reference of the nginx Pod", + }, + { + name: "replicaSet has multiple owners", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + { + Kind: "Deployment", + Name: "nginx-deployment2", + }, + }, + }, + }, + errString: "expected one owner reference of the nginx ReplicaSet", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeClient, err := createFakeK8sClient(test.podList, test.replicaSet) + g.Expect(err).ToNot(HaveOccurred()) + + cs := newCommandService( + logr.Discard(), + fakeClient, + NewDeploymentStore(nil), + nil, + status.NewQueue(), + nil, + ) + + owner, err := cs.getPodOwner(test.podName) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + g.Expect(owner).To(Equal(types.NamespacedName{})) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(owner).To(Equal(test.expected)) + }) + } +} + +func TestUpdateDataPlaneStatus(t *testing.T) { + t.Parallel() + + tests := []struct { + request *pb.UpdateDataPlaneStatusRequest + response *pb.UpdateDataPlaneStatusResponse + ctx context.Context + errString string + expID string + name string + }{ + { + name: "successfully sets the status", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{ + Resource: &pb.Resource{ + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX, + }, + }, + }, + }, + }, + expID: "nginx-id", + response: &pb.UpdateDataPlaneStatusResponse{}, + }, + { + name: "successfully sets the status using plus", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{ + Resource: &pb.Resource{ + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-plus-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS, + }, + }, + }, + }, + }, + expID: "nginx-plus-id", + response: &pb.UpdateDataPlaneStatusResponse{}, + }, + { + name: "request is nil", + request: nil, + response: nil, + errString: "empty UpdateDataPlaneStatus request", + }, + { + name: "context is missing data", + ctx: context.Background(), + request: &pb.UpdateDataPlaneStatusRequest{}, + response: nil, + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "request does not contain ID", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{}, + response: nil, + errString: "request does not contain nginx instanceID", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + nil, + ) + + resp, err := cs.UpdateDataPlaneStatus(test.ctx, test.request) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + g.Expect(resp).To(BeNil()) + + g.Expect(connTracker.SetInstanceIDCallCount()).To(Equal(0)) + + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(test.response)) + + g.Expect(connTracker.SetInstanceIDCallCount()).To(Equal(1)) + + key, id := connTracker.SetInstanceIDArgsForCall(0) + g.Expect(key).To(Equal("127.0.0.1")) + g.Expect(id).To(Equal(test.expID)) + }) + } +} + +func TestUpdateDataPlaneHealth(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + nil, + ) + + resp, err := cs.UpdateDataPlaneHealth(context.Background(), &pb.UpdateDataPlaneHealthRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateDataPlaneHealthResponse{})) +} diff --git a/internal/mode/static/nginx/agent/deployment.go b/internal/mode/static/nginx/agent/deployment.go new file mode 100644 index 0000000000..3aa8d80b6b --- /dev/null +++ b/internal/mode/static/nginx/agent/deployment.go @@ -0,0 +1,289 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "sync" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" + "k8s.io/apimachinery/pkg/types" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" +) + +// ignoreFiles is a list of static or base files that live in the +// nginx container that should not be touched by the agent. Any files +// that we add directly into the container should be added here. +var ignoreFiles = []string{ + "/etc/nginx/nginx.conf", + "/etc/nginx/mime.types", + "/etc/nginx/grpc-error-locations.conf", + "/etc/nginx/grpc-error-pages.conf", + "/usr/share/nginx/html/50x.html", + "/usr/share/nginx/html/dashboard.html", + "/usr/share/nginx/html/index.html", + "/usr/share/nginx/html/nginx-modules-reference.pdf", +} + +const fileMode = "0644" + +// Deployment represents an nginx Deployment. It contains its own nginx configuration files, +// a broadcaster for sending those files to all of its pods that are subscribed, and errors +// that may have occurred while applying configuration. +type Deployment struct { + // podStatuses is a map of all Pods for this Deployment and the most recent error + // (or nil if successful) that occurred on a config call to the nginx agent. + podStatuses map[string]error + + broadcaster broadcast.Broadcaster + + configVersion string + // error that is set if a ConfigApply call failed for a Pod. This is needed + // because if subsequent upstream API calls are made within the same update event, + // and are successful, the previous error would be lost in the podStatuses map. + // It's used to preserve the error for when we write status after fully updating nginx. + latestConfigError error + // error that is set when at least one upstream API call failed for a Pod. + // This is needed because subsequent API calls within the same update event could succeed, + // and therefore the previous error would be lost in the podStatuses map. It's used to preserve + // the error for when we write status after fully updating nginx. + latestUpstreamError error + + nginxPlusActions []*pb.NGINXPlusAction + fileOverviews []*pb.File + files []File + + FileLock sync.RWMutex + errLock sync.RWMutex +} + +// newDeployment returns a new Deployment object. +func newDeployment(broadcaster broadcast.Broadcaster) *Deployment { + return &Deployment{ + broadcaster: broadcaster, + podStatuses: make(map[string]error), + } +} + +// GetBroadcaster returns the deployment's broadcaster. +func (d *Deployment) GetBroadcaster() broadcast.Broadcaster { + return d.broadcaster +} + +// SetLatestConfigError sets the latest config apply error for the deployment. +func (d *Deployment) SetLatestConfigError(err error) { + d.errLock.Lock() + defer d.errLock.Unlock() + + d.latestConfigError = err +} + +// SetLatestUpstreamError sets the latest upstream update error for the deployment. +func (d *Deployment) SetLatestUpstreamError(err error) { + d.errLock.Lock() + defer d.errLock.Unlock() + + d.latestUpstreamError = err +} + +// GetLatestConfigError gets the latest config apply error for the deployment. +func (d *Deployment) GetLatestConfigError() error { + d.errLock.RLock() + defer d.errLock.RUnlock() + + return d.latestConfigError +} + +// GetLatestUpstreamError gets the latest upstream update error for the deployment. +func (d *Deployment) GetLatestUpstreamError() error { + d.errLock.RLock() + defer d.errLock.RUnlock() + + return d.latestUpstreamError +} + +// SetPodErrorStatus sets the error status of a Pod in this Deployment if applying the config failed. +func (d *Deployment) SetPodErrorStatus(pod string, err error) { + d.errLock.Lock() + defer d.errLock.Unlock() + + d.podStatuses[pod] = err +} + +// RemovePodStatus deletes a pod from the pod status map. +func (d *Deployment) RemovePodStatus(podName string) { + d.errLock.Lock() + defer d.errLock.Unlock() + + delete(d.podStatuses, podName) +} + +// GetConfigurationStatus returns the current config status for this Deployment. It combines +// the most recent errors (if they exist) for all Pods in the Deployment into a single error. +func (d *Deployment) GetConfigurationStatus() error { + d.errLock.RLock() + defer d.errLock.RUnlock() + + errs := make([]error, 0, len(d.podStatuses)) + for _, err := range d.podStatuses { + errs = append(errs, err) + } + + if len(errs) == 1 { + return errs[0] + } + + return errors.Join(errs...) +} + +/* +The following functions for the Deployment object are UNLOCKED, meaning that they are unsafe. +Callers of these functions MUST ensure the FileLock is set before calling. + +These functions are called as part of the ConfigApply or APIRequest processes. These entire processes +are locked by the caller, hence why the functions themselves do not set the locks. +*/ + +// GetFileOverviews returns the current list of fileOverviews and configVersion for the deployment. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) GetFileOverviews() ([]*pb.File, string) { + return d.fileOverviews, d.configVersion +} + +// GetNGINXPlusActions returns the current NGINX Plus API Actions for the deployment. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) GetNGINXPlusActions() []*pb.NGINXPlusAction { + return d.nginxPlusActions +} + +// GetFile gets the requested file for the deployment and returns its contents. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) GetFile(name, hash string) []byte { + for _, file := range d.files { + if name == file.Meta.GetName() && hash == file.Meta.GetHash() { + return file.Contents + } + } + + return nil +} + +// SetFiles updates the nginx files and fileOverviews for the deployment and returns the message to send. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) SetFiles(files []File) *broadcast.NginxAgentMessage { + d.files = files + + fileOverviews := make([]*pb.File, 0, len(files)) + for _, file := range files { + fileOverviews = append(fileOverviews, &pb.File{FileMeta: file.Meta}) + } + + // add ignored files to the overview as 'unmanaged' so agent doesn't touch them + for _, f := range ignoreFiles { + meta := &pb.FileMeta{ + Name: f, + Permissions: fileMode, + } + + fileOverviews = append(fileOverviews, &pb.File{ + FileMeta: meta, + Unmanaged: true, + }) + } + + newConfigVersion := filesHelper.GenerateConfigVersion(fileOverviews) + if d.configVersion == newConfigVersion { + // files have not changed, nothing to send + return nil + } + + d.configVersion = newConfigVersion + d.fileOverviews = fileOverviews + + return &broadcast.NginxAgentMessage{ + Type: broadcast.ConfigApplyRequest, + FileOverviews: fileOverviews, + ConfigVersion: d.configVersion, + } +} + +// SetNGINXPlusActions updates the deployment's latest NGINX Plus Actions to perform if using NGINX Plus. +// Used by a Subscriber when it first connects. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) SetNGINXPlusActions(actions []*pb.NGINXPlusAction) { + d.nginxPlusActions = actions +} + +//counterfeiter:generate . DeploymentStorer + +// DeploymentStorer is an interface to store Deployments. +type DeploymentStorer interface { + Get(types.NamespacedName) *Deployment + GetOrStore(context.Context, types.NamespacedName, chan struct{}) *Deployment + Remove(types.NamespacedName) +} + +// DeploymentStore holds a map of all Deployments. +type DeploymentStore struct { + connTracker agentgrpc.ConnectionsTracker + deployments sync.Map +} + +// NewDeploymentStore returns a new instance of a DeploymentStore. +func NewDeploymentStore(connTracker agentgrpc.ConnectionsTracker) *DeploymentStore { + return &DeploymentStore{ + connTracker: connTracker, + } +} + +// Get returns the desired deployment from the store. +func (d *DeploymentStore) Get(nsName types.NamespacedName) *Deployment { + val, ok := d.deployments.Load(nsName) + if !ok { + return nil + } + + deployment, ok := val.(*Deployment) + if !ok { + panic(fmt.Sprintf("expected Deployment, got type %T", val)) + } + + return deployment +} + +// GetOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +func (d *DeploymentStore) GetOrStore( + ctx context.Context, + nsName types.NamespacedName, + stopCh chan struct{}, +) *Deployment { + if deployment := d.Get(nsName); deployment != nil { + return deployment + } + + deployment := newDeployment(broadcast.NewDeploymentBroadcaster(ctx, stopCh)) + d.deployments.Store(nsName, deployment) + + return deployment +} + +// StoreWithBroadcaster creates a new Deployment with the supplied broadcaster and stores it. +// Used in unit tests to provide a mock broadcaster. +func (d *DeploymentStore) StoreWithBroadcaster( + nsName types.NamespacedName, + broadcaster broadcast.Broadcaster, +) *Deployment { + deployment := newDeployment(broadcaster) + d.deployments.Store(nsName, deployment) + + return deployment +} + +// Remove the deployment from the store. +func (d *DeploymentStore) Remove(nsName types.NamespacedName) { + d.deployments.Delete(nsName) +} diff --git a/internal/mode/static/nginx/agent/deployment_test.go b/internal/mode/static/nginx/agent/deployment_test.go new file mode 100644 index 0000000000..57d9510588 --- /dev/null +++ b/internal/mode/static/nginx/agent/deployment_test.go @@ -0,0 +1,147 @@ +package agent + +import ( + "context" + "errors" + "testing" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" +) + +func TestNewDeployment(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + g.Expect(deployment).ToNot(BeNil()) + + g.Expect(deployment.GetBroadcaster()).ToNot(BeNil()) + g.Expect(deployment.GetFileOverviews()).To(BeEmpty()) + g.Expect(deployment.GetNGINXPlusActions()).To(BeEmpty()) + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) +} + +func TestSetAndGetFiles(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + }, + } + + msg := deployment.SetFiles(files) + fileOverviews, configVersion := deployment.GetFileOverviews() + + g.Expect(msg.Type).To(Equal(broadcast.ConfigApplyRequest)) + g.Expect(msg.ConfigVersion).To(Equal(configVersion)) + g.Expect(msg.FileOverviews).To(HaveLen(9)) // 1 file + 8 ignored files + g.Expect(fileOverviews).To(Equal(msg.FileOverviews)) + + file := deployment.GetFile("test.conf", "12345") + g.Expect(file).To(Equal([]byte("test content"))) + + g.Expect(deployment.GetFile("invalid", "12345")).To(BeNil()) + g.Expect(deployment.GetFile("test.conf", "invalid")).To(BeNil()) + + // Set the same files again + msg = deployment.SetFiles(files) + g.Expect(msg).To(BeNil()) + + newFileOverviews, _ := deployment.GetFileOverviews() + g.Expect(newFileOverviews).To(Equal(fileOverviews)) +} + +func TestSetNGINXPlusActions(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + actions := []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{}, + }, + } + + deployment.SetNGINXPlusActions(actions) + g.Expect(deployment.GetNGINXPlusActions()).To(Equal(actions)) +} + +func TestSetPodErrorStatus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + err2 := errors.New("test error 2") + deployment.SetPodErrorStatus("test-pod", err) + deployment.SetPodErrorStatus("test-pod2", err2) + + g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error"))) + g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error 2"))) + + deployment.RemovePodStatus("test-pod") + g.Expect(deployment.podStatuses).ToNot(HaveKey("test-pod")) +} + +func TestSetLatestConfigError(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + deployment.SetLatestConfigError(err) + g.Expect(deployment.GetLatestConfigError()).To(MatchError(err)) +} + +func TestSetLatestUpstreamError(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + deployment.SetLatestUpstreamError(err) + g.Expect(deployment.GetLatestUpstreamError()).To(MatchError(err)) +} + +func TestDeploymentStore(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := NewDeploymentStore(&agentgrpcfakes.FakeConnectionsTracker{}) + + nsName := types.NamespacedName{Namespace: "default", Name: "test-deployment"} + + deployment := store.GetOrStore(context.Background(), nsName, nil) + g.Expect(deployment).ToNot(BeNil()) + + fetchedDeployment := store.Get(nsName) + g.Expect(fetchedDeployment).To(Equal(deployment)) + + deployment = store.GetOrStore(context.Background(), nsName, nil) + g.Expect(fetchedDeployment).To(Equal(deployment)) + + store.Remove(nsName) + g.Expect(store.Get(nsName)).To(BeNil()) +} diff --git a/internal/mode/static/nginx/agent/doc.go b/internal/mode/static/nginx/agent/doc.go new file mode 100644 index 0000000000..8ffe4381f3 --- /dev/null +++ b/internal/mode/static/nginx/agent/doc.go @@ -0,0 +1,4 @@ +/* +Package agent contains the functions for sending nginx configuration to the agent. +*/ +package agent diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go new file mode 100644 index 0000000000..fa604bc16b --- /dev/null +++ b/internal/mode/static/nginx/agent/file.go @@ -0,0 +1,111 @@ +package agent + +import ( + "context" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" +) + +// File is an nginx configuration file that the nginx agent gets from the control plane +// after a ConfigApplyRequest. +type File struct { + Meta *pb.FileMeta + Contents []byte +} + +// fileService handles file management between the control plane and the agent. +type fileService struct { + pb.FileServiceServer + nginxDeployments *DeploymentStore + connTracker agentgrpc.ConnectionsTracker + logger logr.Logger +} + +func newFileService( + logger logr.Logger, + depStore *DeploymentStore, + connTracker agentgrpc.ConnectionsTracker, +) *fileService { + return &fileService{ + logger: logger, + nginxDeployments: depStore, + connTracker: connTracker, + } +} + +func (fs *fileService) Register(server *grpc.Server) { + pb.RegisterFileServiceServer(server, fs) +} + +// GetFile is called by the agent when it needs to download a file for a ConfigApplyRequest. +// The deployment object used to get the files is already LOCKED when this function is called, +// before the ConfigApply transaction is started. +func (fs *fileService) GetFile( + ctx context.Context, + req *pb.GetFileRequest, +) (*pb.GetFileResponse, error) { + filename := req.GetFileMeta().GetName() + hash := req.GetFileMeta().GetHash() + + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + conn := fs.connTracker.GetConnection(gi.IPAddress) + if conn.PodName == "" { + return nil, status.Errorf(codes.NotFound, "connection not found") + } + + deployment := fs.nginxDeployments.Get(conn.Parent) + if deployment == nil { + return nil, status.Errorf(codes.NotFound, "deployment not found in store") + } + + contents := deployment.GetFile(filename, hash) + if len(contents) == 0 { + return nil, status.Errorf(codes.NotFound, "file not found") + } + + fs.logger.V(1).Info("Getting file for agent", "file", filename) + + return &pb.GetFileResponse{ + Contents: &pb.FileContents{ + Contents: contents, + }, + }, nil +} + +// GetOverview gets the overview of files for a particular configuration version of an instance. +// At the moment it doesn't appear to be used by the agent. +func (fs *fileService) GetOverview( + _ context.Context, + _ *pb.GetOverviewRequest, +) (*pb.GetOverviewResponse, error) { + return &pb.GetOverviewResponse{}, nil +} + +// UpdateOverview is called by agent on startup and whenever any files change on the instance. +// Since directly changing nginx configuration on the instance is not supported, this is a no-op for NGF. +func (fs *fileService) UpdateOverview( + _ context.Context, + _ *pb.UpdateOverviewRequest, +) (*pb.UpdateOverviewResponse, error) { + return &pb.UpdateOverviewResponse{}, nil +} + +// UpdateFile is called by agent whenever any files change on the instance. +// Since directly changing nginx configuration on the instance is not supported, this is a no-op for NGF. +func (fs *fileService) UpdateFile( + _ context.Context, + _ *pb.UpdateFileRequest, +) (*pb.UpdateFileResponse, error) { + return &pb.UpdateFileResponse{}, nil +} diff --git a/internal/mode/static/nginx/agent/file_test.go b/internal/mode/static/nginx/agent/file_test.go new file mode 100644 index 0000000000..1e683eb214 --- /dev/null +++ b/internal/mode/static/nginx/agent/file_test.go @@ -0,0 +1,209 @@ +package agent + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/types" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" +) + +func TestGetFile(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + depStore := NewDeploymentStore(connTracker) + dep := depStore.GetOrStore(context.Background(), deploymentName, nil) + + fileMeta := &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + } + contents := []byte("test contents") + + dep.files = []File{ + { + Meta: fileMeta, + Contents: contents, + }, + } + + fs := newFileService(logr.Discard(), depStore, connTracker) + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + req := &pb.GetFileRequest{ + FileMeta: fileMeta, + } + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).ToNot(BeNil()) + g.Expect(resp.GetContents()).ToNot(BeNil()) + g.Expect(resp.GetContents().GetContents()).To(Equal(contents)) +} + +func TestGetFile_InvalidConnection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + resp, err := fs.GetFile(context.Background(), req) + + g.Expect(err).To(Equal(agentgrpc.ErrStatusInvalidConnection)) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_ConnectionNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, &agentgrpcfakes.FakeConnectionsTracker{}) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "connection not found"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_DeploymentNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + fs := newFileService(logr.Discard(), NewDeploymentStore(connTracker), connTracker) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "deployment not found in store"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_FileNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + depStore := NewDeploymentStore(connTracker) + depStore.GetOrStore(context.Background(), deploymentName, nil) + + fs := newFileService(logr.Discard(), depStore, connTracker) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "file not found"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetOverview(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.GetOverview(context.Background(), &pb.GetOverviewRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.GetOverviewResponse{})) +} + +func TestUpdateOverview(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.UpdateOverview(context.Background(), &pb.UpdateOverviewRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateOverviewResponse{})) +} + +func TestUpdateFile(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.UpdateFile(context.Background(), &pb.UpdateFileRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateFileResponse{})) +} diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go new file mode 100644 index 0000000000..0bae634ccc --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -0,0 +1,83 @@ +package grpc + +import ( + "sync" + + "k8s.io/apimachinery/pkg/types" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . ConnectionsTracker + +// ConnectionsTracker defines an interface to track all connections between the control plane +// and nginx agents. +type ConnectionsTracker interface { + Track(key string, conn Connection) + GetConnection(key string) Connection + SetInstanceID(key, id string) + RemoveConnection(key string) +} + +// Connection contains the data about a single nginx agent connection. +type Connection struct { + PodName string + InstanceID string + Parent types.NamespacedName +} + +// Ready returns if the connection is ready to be used. In other words, agent +// has registered itself and an nginx instance with the control plane. +func (c *Connection) Ready() bool { + return c.InstanceID != "" +} + +// AgentConnectionsTracker keeps track of all connections between the control plane and nginx agents. +type AgentConnectionsTracker struct { + // connections contains a map of all IP addresses that have connected and their connection info. + connections map[string]Connection + + lock sync.RWMutex +} + +// NewConnectionsTracker returns a new AgentConnectionsTracker instance. +func NewConnectionsTracker() ConnectionsTracker { + return &AgentConnectionsTracker{ + connections: make(map[string]Connection), + } +} + +// Track adds a connection to the tracking map. +func (c *AgentConnectionsTracker) Track(key string, conn Connection) { + c.lock.Lock() + defer c.lock.Unlock() + + c.connections[key] = conn +} + +// GetConnection returns the requested connection. +func (c *AgentConnectionsTracker) GetConnection(key string) Connection { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.connections[key] +} + +// SetInstanceID sets the nginx instanceID for a connection. +func (c *AgentConnectionsTracker) SetInstanceID(key, id string) { + c.lock.Lock() + defer c.lock.Unlock() + + if conn, ok := c.connections[key]; ok { + conn.InstanceID = id + c.connections[key] = conn + } +} + +// RemoveConnection removes a connection from the tracking map. +func (c *AgentConnectionsTracker) RemoveConnection(key string) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.connections, key) +} diff --git a/internal/mode/static/nginx/agent/grpc/connections_test.go b/internal/mode/static/nginx/agent/grpc/connections_test.go new file mode 100644 index 0000000000..c9d7b3cdc3 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/connections_test.go @@ -0,0 +1,95 @@ +package grpc_test + +import ( + "testing" + + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" +) + +func TestGetConnection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) + + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn).To(Equal(conn)) + + nonExistent := tracker.GetConnection("nonexistent") + g.Expect(nonExistent).To(Equal(agentgrpc.Connection{})) +} + +func TestConnectionIsReady(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + + g.Expect(conn.Ready()).To(BeTrue()) +} + +func TestConnectionIsNotReady(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conn := agentgrpc.Connection{ + PodName: "pod1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + + g.Expect(conn.Ready()).To(BeFalse()) +} + +func TestSetInstanceID(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + conn := agentgrpc.Connection{ + PodName: "pod1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) + + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn.Ready()).To(BeFalse()) + + tracker.SetInstanceID("key1", "instance1") + + trackedConn = tracker.GetConnection("key1") + g.Expect(trackedConn.Ready()).To(BeTrue()) + g.Expect(trackedConn.InstanceID).To(Equal("instance1")) +} + +func TestRemoveConnection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) + + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn).To(Equal(conn)) + + tracker.RemoveConnection("key1") + g.Expect(tracker.GetConnection("key1")).To(Equal(agentgrpc.Connection{})) +} diff --git a/internal/mode/static/nginx/agent/grpc/context/context.go b/internal/mode/static/nginx/agent/grpc/context/context.go new file mode 100644 index 0000000000..a3bb0d3642 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/context.go @@ -0,0 +1,25 @@ +package context + +import ( + "context" +) + +// GrpcInfo for storing identity information for the gRPC client. +type GrpcInfo struct { + Token string `json:"token"` // auth token that was provided by the gRPC client + IPAddress string `json:"ip_address"` // ip address of the agent +} + +type contextGRPCKey struct{} + +// NewGrpcContext returns a new context.Context that has the provided GrpcInfo attached. +func NewGrpcContext(ctx context.Context, r GrpcInfo) context.Context { + return context.WithValue(ctx, contextGRPCKey{}, r) +} + +// GrpcInfoFromContext returns the GrpcInfo saved in ctx if it exists. +// Returns false if there's no GrpcInfo in the context. +func GrpcInfoFromContext(ctx context.Context) (GrpcInfo, bool) { + v, ok := ctx.Value(contextGRPCKey{}).(GrpcInfo) + return v, ok +} diff --git a/internal/mode/static/nginx/agent/grpc/context/context_test.go b/internal/mode/static/nginx/agent/grpc/context/context_test.go new file mode 100644 index 0000000000..57acf9152f --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/context_test.go @@ -0,0 +1,31 @@ +package context_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" +) + +func TestGrpcInfoInContext(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + grpcInfo := grpcContext.GrpcInfo{IPAddress: "192.168.1.1"} + + newCtx := grpcContext.NewGrpcContext(context.Background(), grpcInfo) + info, ok := grpcContext.GrpcInfoFromContext(newCtx) + g.Expect(ok).To(BeTrue()) + g.Expect(info).To(Equal(grpcInfo)) +} + +func TestGrpcInfoNotInContext(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + info, ok := grpcContext.GrpcInfoFromContext(context.Background()) + g.Expect(ok).To(BeFalse()) + g.Expect(info).To(Equal(grpcContext.GrpcInfo{})) +} diff --git a/internal/mode/static/nginx/agent/grpc/context/doc.go b/internal/mode/static/nginx/agent/grpc/context/doc.go new file mode 100644 index 0000000000..689a126cf7 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/doc.go @@ -0,0 +1,4 @@ +/* +Package context contains the functions for storing extra information in the gRPC context. +*/ +package context diff --git a/internal/mode/static/nginx/agent/grpc/doc.go b/internal/mode/static/nginx/agent/grpc/doc.go new file mode 100644 index 0000000000..b98f0af8b6 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/doc.go @@ -0,0 +1,4 @@ +/* +Package grpc contains the functionality for the gRPC server for communicating with the nginx agent. +*/ +package grpc diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go b/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go new file mode 100644 index 0000000000..cd54f18c44 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go @@ -0,0 +1,4 @@ +/* +Package filewatcher contains the functions to watch for TLS file updates for the gRPC server. +*/ +package filewatcher diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go new file mode 100644 index 0000000000..2d79f4047c --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go @@ -0,0 +1,106 @@ +package filewatcher + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-logr/logr" +) + +const monitoringInterval = 5 * time.Second + +var emptyEvent = fsnotify.Event{ + Name: "", + Op: 0, +} + +// FileWatcher watches for changes to files and notifies the channel when a change occurs. +type FileWatcher struct { + filesChanged *atomic.Bool + watcher *fsnotify.Watcher + notifyCh chan<- struct{} + logger logr.Logger + filesToWatch []string + interval time.Duration +} + +// NewFileWatcher creates a new FileWatcher instance. +func NewFileWatcher(logger logr.Logger, files []string, notifyCh chan<- struct{}) (*FileWatcher, error) { + filesChanged := &atomic.Bool{} + + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, fmt.Errorf("failed to initialize TLS file watcher: %w", err) + } + + return &FileWatcher{ + filesChanged: filesChanged, + watcher: watcher, + logger: logger, + filesToWatch: files, + notifyCh: notifyCh, + interval: monitoringInterval, + }, nil +} + +// Watch starts the watch for file changes. +func (w *FileWatcher) Watch(ctx context.Context) { + w.logger.V(1).Info("Starting file watcher") + + ticker := time.NewTicker(w.interval) + for _, file := range w.filesToWatch { + w.addWatcher(file) + } + + for { + select { + case <-ctx.Done(): + if err := w.watcher.Close(); err != nil { + w.logger.Error(err, "unable to close file watcher") + } + return + case event := <-w.watcher.Events: + w.handleEvent(event) + case <-ticker.C: + w.checkForUpdates() + case err := <-w.watcher.Errors: + w.logger.Error(err, "error watching file") + } + } +} + +func (w *FileWatcher) addWatcher(path string) { + if err := w.watcher.Add(path); err != nil { + w.logger.Error(err, "failed to watch file", "file", path) + } +} + +func (w *FileWatcher) handleEvent(event fsnotify.Event) { + if isEventSkippable(event) { + return + } + + if event.Has(fsnotify.Remove) || event.Has(fsnotify.Rename) { + w.addWatcher(event.Name) + } + + w.filesChanged.Store(true) +} + +func (w *FileWatcher) checkForUpdates() { + if w.filesChanged.Load() { + w.logger.Info("TLS files changed, sending notification to reset nginx agent connections") + w.notifyCh <- struct{}{} + w.filesChanged.Store(false) + } +} + +func isEventSkippable(event fsnotify.Event) bool { + return event == emptyEvent || + event.Name == "" || + event.Has(fsnotify.Chmod) || + event.Has(fsnotify.Create) +} diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go new file mode 100644 index 0000000000..1840e78849 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go @@ -0,0 +1,69 @@ +package filewatcher + +import ( + "context" + "os" + "path" + "testing" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" +) + +func TestFileWatcher_Watch(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + notifyCh := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + file := path.Join(os.TempDir(), "test-file") + _, err := os.Create(file) + g.Expect(err).ToNot(HaveOccurred()) + defer os.Remove(file) + + w, err := NewFileWatcher(logr.Discard(), []string{file}, notifyCh) + g.Expect(err).ToNot(HaveOccurred()) + w.interval = 300 * time.Millisecond + + go w.Watch(ctx) + + w.watcher.Events <- fsnotify.Event{Name: file, Op: fsnotify.Write} + g.Eventually(func() bool { + return w.filesChanged.Load() + }).Should(BeTrue()) + + g.Eventually(notifyCh).Should(Receive()) +} + +func TestFileWatcher_handleEvent(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + w, err := NewFileWatcher(logr.Discard(), []string{"test-file"}, nil) + g.Expect(err).ToNot(HaveOccurred()) + + w.handleEvent(fsnotify.Event{Op: fsnotify.Write}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-chmod", Op: fsnotify.Chmod}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-create", Op: fsnotify.Create}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-write", Op: fsnotify.Write}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) + + w.handleEvent(fsnotify.Event{Name: "test-remove", Op: fsnotify.Remove}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) + + w.handleEvent(fsnotify.Event{Name: "test-rename", Op: fsnotify.Rename}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) +} diff --git a/internal/mode/static/nginx/agent/grpc/grpc.go b/internal/mode/static/nginx/agent/grpc/grpc.go new file mode 100644 index 0000000000..f995756584 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/grpc.go @@ -0,0 +1,154 @@ +package grpc + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "os" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/filewatcher" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/interceptor" +) + +const ( + keepAliveTime = 15 * time.Second + keepAliveTimeout = 10 * time.Second + caCertPath = "/var/run/secrets/ngf/ca.crt" + tlsCertPath = "/var/run/secrets/ngf/tls.crt" + tlsKeyPath = "/var/run/secrets/ngf/tls.key" +) + +var ErrStatusInvalidConnection = status.Error(codes.Unauthenticated, "invalid connection") + +// Interceptor provides hooks to intercept the execution of an RPC on the server. +type Interceptor interface { + Stream(logr.Logger) grpc.StreamServerInterceptor + Unary(logr.Logger) grpc.UnaryServerInterceptor +} + +// Server is a gRPC server for communicating with the nginx agent. +type Server struct { + // Interceptor provides hooks to intercept the execution of an RPC on the server. + interceptor Interceptor + + logger logr.Logger + + // resetConnChan is used by the filewatcher to trigger the Command service to + // reset any connections when TLS files are updated. + resetConnChan chan<- struct{} + // RegisterServices is a list of functions to register gRPC services to the gRPC server. + registerServices []func(*grpc.Server) + // Port is the port that the server is listening on. + // Must be exposed in the control plane deployment/service. + port int +} + +func NewServer( + logger logr.Logger, + port int, + registerSvcs []func(*grpc.Server), + k8sClient client.Client, + tokenAudience string, + resetConnChan chan<- struct{}, +) *Server { + return &Server{ + logger: logger, + port: port, + registerServices: registerSvcs, + interceptor: interceptor.NewContextSetter(k8sClient, tokenAudience), + resetConnChan: resetConnChan, + } +} + +// Start is a runnable that starts the gRPC server for communicating with the nginx agent. +func (g *Server) Start(ctx context.Context) error { + listener, err := net.Listen("tcp", fmt.Sprintf(":%d", g.port)) + if err != nil { + return err + } + + tlsCredentials, err := getTLSConfig() + if err != nil { + return err + } + + server := grpc.NewServer( + grpc.KeepaliveParams( + keepalive.ServerParameters{ + Time: keepAliveTime, + Timeout: keepAliveTimeout, + }, + ), + grpc.KeepaliveEnforcementPolicy( + keepalive.EnforcementPolicy{ + MinTime: keepAliveTime, + PermitWithoutStream: true, + }, + ), + grpc.ChainStreamInterceptor(g.interceptor.Stream(g.logger)), + grpc.ChainUnaryInterceptor(g.interceptor.Unary(g.logger)), + grpc.Creds(tlsCredentials), + ) + + for _, registerSvc := range g.registerServices { + registerSvc(server) + } + + tlsFiles := []string{caCertPath, tlsCertPath, tlsKeyPath} + fileWatcher, err := filewatcher.NewFileWatcher(g.logger.WithName("fileWatcher"), tlsFiles, g.resetConnChan) + if err != nil { + return err + } + + go fileWatcher.Watch(ctx) + + go func() { + <-ctx.Done() + g.logger.Info("Shutting down GRPC Server") + // Since we use a long-lived stream, GracefulStop does not terminate. Therefore we use Stop. + server.Stop() + }() + + return server.Serve(listener) +} + +func getTLSConfig() (credentials.TransportCredentials, error) { + caPem, err := os.ReadFile(caCertPath) + if err != nil { + return nil, err + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caPem) { + return nil, errors.New("error parsing CA PEM") + } + + getCertificateCallback := func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + serverCert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + return &serverCert, err + } + + tlsConfig := &tls.Config{ + GetCertificate: getCertificateCallback, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: certPool, + MinVersion: tls.VersionTLS13, + } + + return credentials.NewTLS(tlsConfig), nil +} + +var _ manager.Runnable = &Server{} diff --git a/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go new file mode 100644 index 0000000000..8ae97043cd --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go @@ -0,0 +1,232 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package grpcfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" +) + +type FakeConnectionsTracker struct { + GetConnectionStub func(string) grpc.Connection + getConnectionMutex sync.RWMutex + getConnectionArgsForCall []struct { + arg1 string + } + getConnectionReturns struct { + result1 grpc.Connection + } + getConnectionReturnsOnCall map[int]struct { + result1 grpc.Connection + } + RemoveConnectionStub func(string) + removeConnectionMutex sync.RWMutex + removeConnectionArgsForCall []struct { + arg1 string + } + SetInstanceIDStub func(string, string) + setInstanceIDMutex sync.RWMutex + setInstanceIDArgsForCall []struct { + arg1 string + arg2 string + } + TrackStub func(string, grpc.Connection) + trackMutex sync.RWMutex + trackArgsForCall []struct { + arg1 string + arg2 grpc.Connection + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeConnectionsTracker) GetConnection(arg1 string) grpc.Connection { + fake.getConnectionMutex.Lock() + ret, specificReturn := fake.getConnectionReturnsOnCall[len(fake.getConnectionArgsForCall)] + fake.getConnectionArgsForCall = append(fake.getConnectionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetConnectionStub + fakeReturns := fake.getConnectionReturns + fake.recordInvocation("GetConnection", []interface{}{arg1}) + fake.getConnectionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeConnectionsTracker) GetConnectionCallCount() int { + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + return len(fake.getConnectionArgsForCall) +} + +func (fake *FakeConnectionsTracker) GetConnectionCalls(stub func(string) grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = stub +} + +func (fake *FakeConnectionsTracker) GetConnectionArgsForCall(i int) string { + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + argsForCall := fake.getConnectionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeConnectionsTracker) GetConnectionReturns(result1 grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = nil + fake.getConnectionReturns = struct { + result1 grpc.Connection + }{result1} +} + +func (fake *FakeConnectionsTracker) GetConnectionReturnsOnCall(i int, result1 grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = nil + if fake.getConnectionReturnsOnCall == nil { + fake.getConnectionReturnsOnCall = make(map[int]struct { + result1 grpc.Connection + }) + } + fake.getConnectionReturnsOnCall[i] = struct { + result1 grpc.Connection + }{result1} +} + +func (fake *FakeConnectionsTracker) RemoveConnection(arg1 string) { + fake.removeConnectionMutex.Lock() + fake.removeConnectionArgsForCall = append(fake.removeConnectionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.RemoveConnectionStub + fake.recordInvocation("RemoveConnection", []interface{}{arg1}) + fake.removeConnectionMutex.Unlock() + if stub != nil { + fake.RemoveConnectionStub(arg1) + } +} + +func (fake *FakeConnectionsTracker) RemoveConnectionCallCount() int { + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() + return len(fake.removeConnectionArgsForCall) +} + +func (fake *FakeConnectionsTracker) RemoveConnectionCalls(stub func(string)) { + fake.removeConnectionMutex.Lock() + defer fake.removeConnectionMutex.Unlock() + fake.RemoveConnectionStub = stub +} + +func (fake *FakeConnectionsTracker) RemoveConnectionArgsForCall(i int) string { + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() + argsForCall := fake.removeConnectionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeConnectionsTracker) SetInstanceID(arg1 string, arg2 string) { + fake.setInstanceIDMutex.Lock() + fake.setInstanceIDArgsForCall = append(fake.setInstanceIDArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.SetInstanceIDStub + fake.recordInvocation("SetInstanceID", []interface{}{arg1, arg2}) + fake.setInstanceIDMutex.Unlock() + if stub != nil { + fake.SetInstanceIDStub(arg1, arg2) + } +} + +func (fake *FakeConnectionsTracker) SetInstanceIDCallCount() int { + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + return len(fake.setInstanceIDArgsForCall) +} + +func (fake *FakeConnectionsTracker) SetInstanceIDCalls(stub func(string, string)) { + fake.setInstanceIDMutex.Lock() + defer fake.setInstanceIDMutex.Unlock() + fake.SetInstanceIDStub = stub +} + +func (fake *FakeConnectionsTracker) SetInstanceIDArgsForCall(i int) (string, string) { + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + argsForCall := fake.setInstanceIDArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeConnectionsTracker) Track(arg1 string, arg2 grpc.Connection) { + fake.trackMutex.Lock() + fake.trackArgsForCall = append(fake.trackArgsForCall, struct { + arg1 string + arg2 grpc.Connection + }{arg1, arg2}) + stub := fake.TrackStub + fake.recordInvocation("Track", []interface{}{arg1, arg2}) + fake.trackMutex.Unlock() + if stub != nil { + fake.TrackStub(arg1, arg2) + } +} + +func (fake *FakeConnectionsTracker) TrackCallCount() int { + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + return len(fake.trackArgsForCall) +} + +func (fake *FakeConnectionsTracker) TrackCalls(stub func(string, grpc.Connection)) { + fake.trackMutex.Lock() + defer fake.trackMutex.Unlock() + fake.TrackStub = stub +} + +func (fake *FakeConnectionsTracker) TrackArgsForCall(i int) (string, grpc.Connection) { + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + argsForCall := fake.trackArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeConnectionsTracker) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeConnectionsTracker) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ grpc.ConnectionsTracker = new(FakeConnectionsTracker) diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/doc.go b/internal/mode/static/nginx/agent/grpc/interceptor/doc.go new file mode 100644 index 0000000000..e5175664b9 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/doc.go @@ -0,0 +1,4 @@ +/* +Package interceptor contains the middleware for intercepting an RPC call. +*/ +package interceptor diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go new file mode 100644 index 0000000000..87517c5875 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go @@ -0,0 +1,196 @@ +package interceptor + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" +) + +const ( + headerUUID = "uuid" + headerAuth = "authorization" +) + +// streamHandler is a struct that implements StreamHandler, allowing the interceptor to replace the context. +type streamHandler struct { + grpc.ServerStream + ctx context.Context +} + +func (sh *streamHandler) Context() context.Context { + return sh.ctx +} + +type ContextSetter struct { + k8sClient client.Client + audience string +} + +func NewContextSetter(k8sClient client.Client, audience string) ContextSetter { + return ContextSetter{ + k8sClient: k8sClient, + audience: audience, + } +} + +func (c ContextSetter) Stream(logger logr.Logger) grpc.StreamServerInterceptor { + return func( + srv any, + ss grpc.ServerStream, + _ *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx, err := c.validateConnection(ss.Context()) + if err != nil { + logger.Error(err, "error validating connection") + return err + } + return handler(srv, &streamHandler{ + ServerStream: ss, + ctx: ctx, + }) + } +} + +func (c ContextSetter) Unary(logger logr.Logger) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req any, + _ *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp any, err error) { + if ctx, err = c.validateConnection(ctx); err != nil { + logger.Error(err, "error validating connection") + return nil, err + } + return handler(ctx, req) + } +} + +// validateConnection checks that the connection is valid and returns a new +// context containing information used by the gRPC command/file services. +func (c ContextSetter) validateConnection(ctx context.Context) (context.Context, error) { + gi, err := getGrpcInfo(ctx) + if err != nil { + return nil, err + } + + return c.validateToken(ctx, gi) +} + +func getGrpcInfo(ctx context.Context) (*grpcContext.GrpcInfo, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.InvalidArgument, "no metadata") + } + + id := md.Get(headerUUID) + if len(id) == 0 { + return nil, status.Error(codes.Unauthenticated, "no identity") + } + + auths := md.Get(headerAuth) + if len(auths) == 0 { + return nil, status.Error(codes.Unauthenticated, "no authorization") + } + + p, ok := peer.FromContext(ctx) + if !ok { + return nil, status.Error(codes.InvalidArgument, "no peer data") + } + + addr, ok := p.Addr.(*net.TCPAddr) + if !ok { + panic(fmt.Sprintf("address %q was not of type net.TCPAddr", p.Addr.String())) + } + + return &grpcContext.GrpcInfo{ + Token: auths[0], + IPAddress: addr.IP.String(), + }, nil +} + +func (c ContextSetter) validateToken(ctx context.Context, gi *grpcContext.GrpcInfo) (context.Context, error) { + tokenReview := &authv1.TokenReview{ + Spec: authv1.TokenReviewSpec{ + Audiences: []string{c.audience}, + Token: gi.Token, + }, + } + + createCtx, createCancel := context.WithTimeout(ctx, 30*time.Second) + defer createCancel() + + if err := c.k8sClient.Create(createCtx, tokenReview); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error creating TokenReview: %v", err)) + } + + if !tokenReview.Status.Authenticated { + return nil, status.Error(codes.Unauthenticated, fmt.Sprintf("invalid authorization: %s", tokenReview.Status.Error)) + } + + usernameItems := strings.Split(tokenReview.Status.User.Username, ":") + if len(usernameItems) != 4 || usernameItems[0] != "system" || usernameItems[1] != "serviceaccount" { + msg := fmt.Sprintf( + "token username must be of the format 'system:serviceaccount:NAMESPACE:NAME': %s", + tokenReview.Status.User.Username, + ) + return nil, status.Error(codes.Unauthenticated, msg) + } + + getCtx, getCancel := context.WithTimeout(ctx, 30*time.Second) + defer getCancel() + + var podList corev1.PodList + opts := &client.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"status.podIP": gi.IPAddress}), + } + + if err := c.k8sClient.List(getCtx, &podList, opts); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error listing pods: %s", err.Error())) + } + + if len(podList.Items) != 1 { + msg := fmt.Sprintf("expected one Pod to have IP address %s, found %d", gi.IPAddress, len(podList.Items)) + return nil, status.Error(codes.Internal, msg) + } + + podNS := podList.Items[0].GetNamespace() + if podNS != usernameItems[2] { + msg := fmt.Sprintf( + "token user namespace %q does not match namespace of requesting pod %q", usernameItems[2], podNS, + ) + return nil, status.Error(codes.Unauthenticated, msg) + } + + scName, ok := podList.Items[0].GetLabels()[controller.AppNameLabel] + if !ok { + msg := fmt.Sprintf("could not get app name from %q label; unable to authenticate token", controller.AppNameLabel) + return nil, status.Error(codes.Unauthenticated, msg) + } + + if scName != usernameItems[3] { + msg := fmt.Sprintf( + "token user name %q does not match service account name of requesting pod %q", usernameItems[3], scName, + ) + return nil, status.Error(codes.Unauthenticated, msg) + } + + return grpcContext.NewGrpcContext(ctx, *gi), nil +} diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go new file mode 100644 index 0000000000..04eda6ad50 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go @@ -0,0 +1,292 @@ +package interceptor + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" +) + +type mockServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (m *mockServerStream) Context() context.Context { + return m.ctx +} + +type mockClient struct { + client.Client + createErr, listErr error + username, appName, podNamespace string + authenticated bool +} + +func (m *mockClient) Create(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + tr, ok := obj.(*authv1.TokenReview) + if !ok { + return errors.New("couldn't convert object to TokenReview") + } + tr.Status.Authenticated = m.authenticated + tr.Status.User = authv1.UserInfo{Username: m.username} + + return m.createErr +} + +func (m *mockClient) List(_ context.Context, obj client.ObjectList, _ ...client.ListOption) error { + podList, ok := obj.(*corev1.PodList) + if !ok { + return errors.New("couldn't convert object to PodList") + } + + var labels map[string]string + if m.appName != "" { + labels = map[string]string{ + controller.AppNameLabel: m.appName, + } + } + + podList.Items = []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: m.podNamespace, + Labels: labels, + }, + }, + } + + return m.listErr +} + +func TestInterceptor(t *testing.T) { + t.Parallel() + + validMetadata := metadata.New(map[string]string{ + headerUUID: "test-uuid", + headerAuth: "test-token", + }) + validPeerData := &peer.Peer{ + Addr: &net.TCPAddr{IP: net.ParseIP("127.0.0.1")}, + } + + tests := []struct { + md metadata.MD + peer *peer.Peer + createErr error + listErr error + username string + appName string + podNamespace string + name string + expErrMsg string + authenticated bool + expErrCode codes.Code + }{ + { + name: "valid request", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.OK, + }, + { + name: "missing metadata", + peer: validPeerData, + authenticated: true, + expErrCode: codes.InvalidArgument, + expErrMsg: "no metadata", + }, + { + name: "missing uuid", + md: metadata.New(map[string]string{ + headerAuth: "test-token", + }), + peer: validPeerData, + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "no identity", + }, + { + name: "missing authorization", + md: metadata.New(map[string]string{ + headerUUID: "test-uuid", + }), + peer: validPeerData, + authenticated: true, + createErr: nil, + expErrCode: codes.Unauthenticated, + expErrMsg: "no authorization", + }, + { + name: "missing peer data", + md: validMetadata, + authenticated: true, + expErrCode: codes.InvalidArgument, + expErrMsg: "no peer data", + }, + { + name: "tokenreview not created", + md: validMetadata, + peer: validPeerData, + authenticated: true, + createErr: errors.New("not created"), + expErrCode: codes.Internal, + expErrMsg: "error creating TokenReview", + }, + { + name: "tokenreview created and not authenticated", + md: validMetadata, + peer: validPeerData, + authenticated: false, + expErrCode: codes.Unauthenticated, + expErrMsg: "invalid authorization", + }, + { + name: "error listing pods", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + listErr: errors.New("can't list"), + expErrCode: codes.Internal, + expErrMsg: "error listing pods", + }, + { + name: "invalid username length", + md: validMetadata, + peer: validPeerData, + username: "serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "missing system from username", + md: validMetadata, + peer: validPeerData, + username: "invalid:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "missing serviceaccount from username", + md: validMetadata, + peer: validPeerData, + username: "system:invalid:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "mismatched namespace in username", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:invalid:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "does not match namespace", + }, + { + name: "mismatched name in username", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:invalid", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "does not match service account name", + }, + { + name: "missing app name label", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "could not get app name", + }, + } + + streamHandler := func(_ any, _ grpc.ServerStream) error { + return nil + } + + unaryHandler := func(_ context.Context, _ any) (any, error) { + return nil, nil //nolint:nilnil // unit test + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + mockK8sClient := &mockClient{ + authenticated: test.authenticated, + createErr: test.createErr, + listErr: test.listErr, + username: test.username, + appName: test.appName, + podNamespace: test.podNamespace, + } + cs := NewContextSetter(mockK8sClient, "ngf-audience") + + ctx := context.Background() + if test.md != nil { + peerCtx := context.Background() + if test.peer != nil { + peerCtx = peer.NewContext(context.Background(), test.peer) + } + ctx = metadata.NewIncomingContext(peerCtx, test.md) + } + + stream := &mockServerStream{ctx: ctx} + + err := cs.Stream(logr.Discard())(nil, stream, nil, streamHandler) + if test.expErrCode != codes.OK { + g.Expect(status.Code(err)).To(Equal(test.expErrCode)) + g.Expect(err.Error()).To(ContainSubstring(test.expErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + _, err = cs.Unary(logr.Discard())(ctx, nil, nil, unaryHandler) + if test.expErrCode != codes.OK { + g.Expect(status.Code(err)).To(Equal(test.expErrCode)) + g.Expect(err.Error()).To(ContainSubstring(test.expErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + }) + } +} diff --git a/internal/mode/static/nginx/agent/grpc/messenger/doc.go b/internal/mode/static/nginx/agent/grpc/messenger/doc.go new file mode 100644 index 0000000000..60150e4ad8 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/doc.go @@ -0,0 +1,4 @@ +/* +Package messenger provides a wrapper around a gRPC stream with the nginx agent. +*/ +package messenger diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messenger.go b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go new file mode 100644 index 0000000000..7e7fbd2b4c --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go @@ -0,0 +1,111 @@ +package messenger + +import ( + "context" + "errors" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . Messenger + +// Messenger is a wrapper around a gRPC stream with the nginx agent. +type Messenger interface { + Run(context.Context) + Send(context.Context, *pb.ManagementPlaneRequest) error + Messages() <-chan *pb.DataPlaneResponse + Errors() <-chan error +} + +// NginxAgentMessenger is the implementation of the Messenger interface. +type NginxAgentMessenger struct { + incoming chan *pb.ManagementPlaneRequest + outgoing chan *pb.DataPlaneResponse + errorCh chan error + server pb.CommandService_SubscribeServer +} + +// New returns a new Messenger instance. +func New(server pb.CommandService_SubscribeServer) Messenger { + return &NginxAgentMessenger{ + incoming: make(chan *pb.ManagementPlaneRequest), + outgoing: make(chan *pb.DataPlaneResponse), + errorCh: make(chan error), + server: server, + } +} + +// Run starts the Messenger to listen for any Send() or Recv() events over the stream. +func (m *NginxAgentMessenger) Run(ctx context.Context) { + go m.handleRecv(ctx) + m.handleSend(ctx) +} + +// Send a message, will return error if the context is Done. +func (m *NginxAgentMessenger) Send(ctx context.Context, msg *pb.ManagementPlaneRequest) error { + select { + case <-ctx.Done(): + return ctx.Err() + case m.incoming <- msg: + } + return nil +} + +func (m *NginxAgentMessenger) handleSend(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case msg := <-m.incoming: + err := m.server.Send(msg) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(ctx.Err(), context.Canceled) { + return + } + m.errorCh <- err + + return + } + } + } +} + +// Messages returns the data plane response channel. +func (m *NginxAgentMessenger) Messages() <-chan *pb.DataPlaneResponse { + return m.outgoing +} + +// Errors returns the error channel. +func (m *NginxAgentMessenger) Errors() <-chan error { + return m.errorCh +} + +// handleRecv handles an incoming message from the nginx agent. +// It blocks until Recv returns. The result from the Recv is either going to Error or Messages channel. +func (m *NginxAgentMessenger) handleRecv(ctx context.Context) { + for { + msg, err := m.server.Recv() + if err != nil { + select { + case <-ctx.Done(): + return + case m.errorCh <- err: + } + return + } + + if msg == nil { + // close the outgoing channel to signal no more messages to be sent + close(m.outgoing) + return + } + + select { + case <-ctx.Done(): + return + case m.outgoing <- msg: + } + } +} diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go b/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go new file mode 100644 index 0000000000..275f2ed875 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go @@ -0,0 +1,125 @@ +package messenger_test + +import ( + "context" + "errors" + "testing" + + v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" +) + +type mockServer struct { + grpc.ServerStream + sendChan chan *v1.ManagementPlaneRequest + recvChan chan *v1.DataPlaneResponse +} + +func (m *mockServer) Send(msg *v1.ManagementPlaneRequest) error { + m.sendChan <- msg + return nil +} + +func (m *mockServer) Recv() (*v1.DataPlaneResponse, error) { + msg, ok := <-m.recvChan + if !ok { + return nil, errors.New("channel closed") + } + return msg, nil +} + +type mockErrorServer struct { + grpc.ServerStream + sendChan chan *v1.ManagementPlaneRequest + recvChan chan *v1.DataPlaneResponse +} + +func (m *mockErrorServer) Send(_ *v1.ManagementPlaneRequest) error { + return errors.New("error sending to server") +} + +func (m *mockErrorServer) Recv() (*v1.DataPlaneResponse, error) { + <-m.recvChan + return nil, errors.New("error received from server") +} + +func createServer() *mockServer { + return &mockServer{ + sendChan: make(chan *v1.ManagementPlaneRequest, 1), + recvChan: make(chan *v1.DataPlaneResponse, 1), + } +} + +func createErrorServer() *mockErrorServer { + return &mockErrorServer{ + sendChan: make(chan *v1.ManagementPlaneRequest, 1), + recvChan: make(chan *v1.DataPlaneResponse, 1), + } +} + +func TestSend(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + msg := &v1.ManagementPlaneRequest{ + MessageMeta: &v1.MessageMeta{ + MessageId: "test", + }, + } + g.Expect(msgr.Send(ctx, msg)).To(Succeed()) + + g.Eventually(server.sendChan).Should(Receive(Equal(msg))) + + cancel() + + g.Expect(msgr.Send(ctx, &v1.ManagementPlaneRequest{})).ToNot(Succeed()) +} + +func TestMessages(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + msg := &v1.DataPlaneResponse{InstanceId: "test"} + server.recvChan <- msg + + g.Eventually(msgr.Messages()).Should(Receive(Equal(msg))) +} + +func TestErrors(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createErrorServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + g.Expect(msgr.Send(ctx, &v1.ManagementPlaneRequest{})).To(Succeed()) + g.Eventually(msgr.Errors()).Should(Receive(MatchError("error sending to server"))) + + server.recvChan <- &v1.DataPlaneResponse{} + + g.Eventually(msgr.Errors()).Should(Receive(MatchError("error received from server"))) +} diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go b/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go new file mode 100644 index 0000000000..6b6a97bef9 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go @@ -0,0 +1,284 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package messengerfakes + +import ( + "context" + "sync" + + v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" +) + +type FakeMessenger struct { + ErrorsStub func() <-chan error + errorsMutex sync.RWMutex + errorsArgsForCall []struct { + } + errorsReturns struct { + result1 <-chan error + } + errorsReturnsOnCall map[int]struct { + result1 <-chan error + } + MessagesStub func() <-chan *v1.DataPlaneResponse + messagesMutex sync.RWMutex + messagesArgsForCall []struct { + } + messagesReturns struct { + result1 <-chan *v1.DataPlaneResponse + } + messagesReturnsOnCall map[int]struct { + result1 <-chan *v1.DataPlaneResponse + } + RunStub func(context.Context) + runMutex sync.RWMutex + runArgsForCall []struct { + arg1 context.Context + } + SendStub func(context.Context, *v1.ManagementPlaneRequest) error + sendMutex sync.RWMutex + sendArgsForCall []struct { + arg1 context.Context + arg2 *v1.ManagementPlaneRequest + } + sendReturns struct { + result1 error + } + sendReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeMessenger) Errors() <-chan error { + fake.errorsMutex.Lock() + ret, specificReturn := fake.errorsReturnsOnCall[len(fake.errorsArgsForCall)] + fake.errorsArgsForCall = append(fake.errorsArgsForCall, struct { + }{}) + stub := fake.ErrorsStub + fakeReturns := fake.errorsReturns + fake.recordInvocation("Errors", []interface{}{}) + fake.errorsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) ErrorsCallCount() int { + fake.errorsMutex.RLock() + defer fake.errorsMutex.RUnlock() + return len(fake.errorsArgsForCall) +} + +func (fake *FakeMessenger) ErrorsCalls(stub func() <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = stub +} + +func (fake *FakeMessenger) ErrorsReturns(result1 <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = nil + fake.errorsReturns = struct { + result1 <-chan error + }{result1} +} + +func (fake *FakeMessenger) ErrorsReturnsOnCall(i int, result1 <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = nil + if fake.errorsReturnsOnCall == nil { + fake.errorsReturnsOnCall = make(map[int]struct { + result1 <-chan error + }) + } + fake.errorsReturnsOnCall[i] = struct { + result1 <-chan error + }{result1} +} + +func (fake *FakeMessenger) Messages() <-chan *v1.DataPlaneResponse { + fake.messagesMutex.Lock() + ret, specificReturn := fake.messagesReturnsOnCall[len(fake.messagesArgsForCall)] + fake.messagesArgsForCall = append(fake.messagesArgsForCall, struct { + }{}) + stub := fake.MessagesStub + fakeReturns := fake.messagesReturns + fake.recordInvocation("Messages", []interface{}{}) + fake.messagesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) MessagesCallCount() int { + fake.messagesMutex.RLock() + defer fake.messagesMutex.RUnlock() + return len(fake.messagesArgsForCall) +} + +func (fake *FakeMessenger) MessagesCalls(stub func() <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = stub +} + +func (fake *FakeMessenger) MessagesReturns(result1 <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = nil + fake.messagesReturns = struct { + result1 <-chan *v1.DataPlaneResponse + }{result1} +} + +func (fake *FakeMessenger) MessagesReturnsOnCall(i int, result1 <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = nil + if fake.messagesReturnsOnCall == nil { + fake.messagesReturnsOnCall = make(map[int]struct { + result1 <-chan *v1.DataPlaneResponse + }) + } + fake.messagesReturnsOnCall[i] = struct { + result1 <-chan *v1.DataPlaneResponse + }{result1} +} + +func (fake *FakeMessenger) Run(arg1 context.Context) { + fake.runMutex.Lock() + fake.runArgsForCall = append(fake.runArgsForCall, struct { + arg1 context.Context + }{arg1}) + stub := fake.RunStub + fake.recordInvocation("Run", []interface{}{arg1}) + fake.runMutex.Unlock() + if stub != nil { + fake.RunStub(arg1) + } +} + +func (fake *FakeMessenger) RunCallCount() int { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + return len(fake.runArgsForCall) +} + +func (fake *FakeMessenger) RunCalls(stub func(context.Context)) { + fake.runMutex.Lock() + defer fake.runMutex.Unlock() + fake.RunStub = stub +} + +func (fake *FakeMessenger) RunArgsForCall(i int) context.Context { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + argsForCall := fake.runArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeMessenger) Send(arg1 context.Context, arg2 *v1.ManagementPlaneRequest) error { + fake.sendMutex.Lock() + ret, specificReturn := fake.sendReturnsOnCall[len(fake.sendArgsForCall)] + fake.sendArgsForCall = append(fake.sendArgsForCall, struct { + arg1 context.Context + arg2 *v1.ManagementPlaneRequest + }{arg1, arg2}) + stub := fake.SendStub + fakeReturns := fake.sendReturns + fake.recordInvocation("Send", []interface{}{arg1, arg2}) + fake.sendMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) SendCallCount() int { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + return len(fake.sendArgsForCall) +} + +func (fake *FakeMessenger) SendCalls(stub func(context.Context, *v1.ManagementPlaneRequest) error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = stub +} + +func (fake *FakeMessenger) SendArgsForCall(i int) (context.Context, *v1.ManagementPlaneRequest) { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + argsForCall := fake.sendArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeMessenger) SendReturns(result1 error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + fake.sendReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeMessenger) SendReturnsOnCall(i int, result1 error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + if fake.sendReturnsOnCall == nil { + fake.sendReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.sendReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeMessenger) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.errorsMutex.RLock() + defer fake.errorsMutex.RUnlock() + fake.messagesMutex.RLock() + defer fake.messagesMutex.RUnlock() + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeMessenger) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ messenger.Messenger = new(FakeMessenger) diff --git a/internal/mode/static/nginx/config/configfakes/fake_generator.go b/internal/mode/static/nginx/config/configfakes/fake_generator.go index d92b09e7e4..0dc5ac408f 100644 --- a/internal/mode/static/nginx/config/configfakes/fake_generator.go +++ b/internal/mode/static/nginx/config/configfakes/fake_generator.go @@ -4,41 +4,41 @@ package configfakes import ( "sync" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) type FakeGenerator struct { - GenerateStub func(dataplane.Configuration) []file.File + GenerateStub func(dataplane.Configuration) []agent.File generateMutex sync.RWMutex generateArgsForCall []struct { arg1 dataplane.Configuration } generateReturns struct { - result1 []file.File + result1 []agent.File } generateReturnsOnCall map[int]struct { - result1 []file.File + result1 []agent.File } - GenerateDeploymentContextStub func(dataplane.DeploymentContext) (file.File, error) + GenerateDeploymentContextStub func(dataplane.DeploymentContext) (agent.File, error) generateDeploymentContextMutex sync.RWMutex generateDeploymentContextArgsForCall []struct { arg1 dataplane.DeploymentContext } generateDeploymentContextReturns struct { - result1 file.File + result1 agent.File result2 error } generateDeploymentContextReturnsOnCall map[int]struct { - result1 file.File + result1 agent.File result2 error } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []file.File { +func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []agent.File { fake.generateMutex.Lock() ret, specificReturn := fake.generateReturnsOnCall[len(fake.generateArgsForCall)] fake.generateArgsForCall = append(fake.generateArgsForCall, struct { @@ -63,7 +63,7 @@ func (fake *FakeGenerator) GenerateCallCount() int { return len(fake.generateArgsForCall) } -func (fake *FakeGenerator) GenerateCalls(stub func(dataplane.Configuration) []file.File) { +func (fake *FakeGenerator) GenerateCalls(stub func(dataplane.Configuration) []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = stub @@ -76,30 +76,30 @@ func (fake *FakeGenerator) GenerateArgsForCall(i int) dataplane.Configuration { return argsForCall.arg1 } -func (fake *FakeGenerator) GenerateReturns(result1 []file.File) { +func (fake *FakeGenerator) GenerateReturns(result1 []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = nil fake.generateReturns = struct { - result1 []file.File + result1 []agent.File }{result1} } -func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []file.File) { +func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = nil if fake.generateReturnsOnCall == nil { fake.generateReturnsOnCall = make(map[int]struct { - result1 []file.File + result1 []agent.File }) } fake.generateReturnsOnCall[i] = struct { - result1 []file.File + result1 []agent.File }{result1} } -func (fake *FakeGenerator) GenerateDeploymentContext(arg1 dataplane.DeploymentContext) (file.File, error) { +func (fake *FakeGenerator) GenerateDeploymentContext(arg1 dataplane.DeploymentContext) (agent.File, error) { fake.generateDeploymentContextMutex.Lock() ret, specificReturn := fake.generateDeploymentContextReturnsOnCall[len(fake.generateDeploymentContextArgsForCall)] fake.generateDeploymentContextArgsForCall = append(fake.generateDeploymentContextArgsForCall, struct { @@ -124,7 +124,7 @@ func (fake *FakeGenerator) GenerateDeploymentContextCallCount() int { return len(fake.generateDeploymentContextArgsForCall) } -func (fake *FakeGenerator) GenerateDeploymentContextCalls(stub func(dataplane.DeploymentContext) (file.File, error)) { +func (fake *FakeGenerator) GenerateDeploymentContextCalls(stub func(dataplane.DeploymentContext) (agent.File, error)) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = stub @@ -137,28 +137,28 @@ func (fake *FakeGenerator) GenerateDeploymentContextArgsForCall(i int) dataplane return argsForCall.arg1 } -func (fake *FakeGenerator) GenerateDeploymentContextReturns(result1 file.File, result2 error) { +func (fake *FakeGenerator) GenerateDeploymentContextReturns(result1 agent.File, result2 error) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = nil fake.generateDeploymentContextReturns = struct { - result1 file.File + result1 agent.File result2 error }{result1, result2} } -func (fake *FakeGenerator) GenerateDeploymentContextReturnsOnCall(i int, result1 file.File, result2 error) { +func (fake *FakeGenerator) GenerateDeploymentContextReturnsOnCall(i int, result1 agent.File, result2 error) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = nil if fake.generateDeploymentContextReturnsOnCall == nil { fake.generateDeploymentContextReturnsOnCall = make(map[int]struct { - result1 file.File + result1 agent.File result2 error }) } fake.generateDeploymentContextReturnsOnCall[i] = struct { - result1 file.File + result1 agent.File result2 error }{result1, result2} } diff --git a/internal/mode/static/nginx/config/convert.go b/internal/mode/static/nginx/config/convert.go deleted file mode 100644 index ece4e1b5c2..0000000000 --- a/internal/mode/static/nginx/config/convert.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "fmt" - - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" -) - -// ConvertEndpoints converts a list of Endpoints into a list of NGINX Plus SDK UpstreamServers. -func ConvertEndpoints(eps []resolver.Endpoint) []ngxclient.UpstreamServer { - servers := make([]ngxclient.UpstreamServer, 0, len(eps)) - - for _, ep := range eps { - port, format := getPortAndIPFormat(ep) - - server := ngxclient.UpstreamServer{ - Server: fmt.Sprintf(format, ep.Address, port), - } - - servers = append(servers, server) - } - - return servers -} - -// ConvertStreamEndpoints converts a list of Endpoints into a list of NGINX Plus SDK StreamUpstreamServers. -func ConvertStreamEndpoints(eps []resolver.Endpoint) []ngxclient.StreamUpstreamServer { - servers := make([]ngxclient.StreamUpstreamServer, 0, len(eps)) - - for _, ep := range eps { - port, format := getPortAndIPFormat(ep) - - server := ngxclient.StreamUpstreamServer{ - Server: fmt.Sprintf(format, ep.Address, port), - } - - servers = append(servers, server) - } - - return servers -} - -func getPortAndIPFormat(ep resolver.Endpoint) (string, string) { - var port string - - if ep.Port != 0 { - port = fmt.Sprintf(":%d", ep.Port) - } - - format := "%s%s" - if ep.IPv6 { - format = "[%s]%s" - } - - return port, format -} diff --git a/internal/mode/static/nginx/config/convert_test.go b/internal/mode/static/nginx/config/convert_test.go deleted file mode 100644 index 312b3d41c1..0000000000 --- a/internal/mode/static/nginx/config/convert_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package config - -import ( - "testing" - - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" -) - -func TestConvertEndpoints(t *testing.T) { - t.Parallel() - endpoints := []resolver.Endpoint{ - { - Address: "1.2.3.4", - Port: 80, - }, - { - Address: "5.6.7.8", - Port: 0, - }, - { - Address: "2001:db8::1", - Port: 443, - IPv6: true, - }, - } - - expUpstreams := []ngxclient.UpstreamServer{ - { - Server: "1.2.3.4:80", - }, - { - Server: "5.6.7.8", - }, - { - Server: "[2001:db8::1]:443", - }, - } - - g := NewWithT(t) - g.Expect(ConvertEndpoints(endpoints)).To(Equal(expUpstreams)) -} - -func TestConvertStreamEndpoints(t *testing.T) { - t.Parallel() - endpoints := []resolver.Endpoint{ - { - Address: "1.2.3.4", - Port: 80, - }, - { - Address: "5.6.7.8", - Port: 0, - }, - { - Address: "2001:db8::1", - Port: 443, - IPv6: true, - }, - } - - expUpstreams := []ngxclient.StreamUpstreamServer{ - { - Server: "1.2.3.4:80", - }, - { - Server: "5.6.7.8", - }, - { - Server: "[2001:db8::1]:443", - }, - } - - g := NewWithT(t) - g.Expect(ConvertStreamEndpoints(endpoints)).To(Equal(expUpstreams)) -} diff --git a/internal/mode/static/nginx/config/generator.go b/internal/mode/static/nginx/config/generator.go index 49be7696d2..5e92544c2d 100644 --- a/internal/mode/static/nginx/config/generator.go +++ b/internal/mode/static/nginx/config/generator.go @@ -6,14 +6,17 @@ import ( "path/filepath" "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/http" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/observability" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/upstreamsettings" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) @@ -47,9 +50,6 @@ const ( // streamConfigFile is the path to the configuration file with Stream configuration. streamConfigFile = streamFolder + "/stream.conf" - // configVersionFile is the path to the config version configuration file. - configVersionFile = httpFolder + "/config-version.conf" - // httpMatchVarsFile is the path to the http_match pairs configuration file. httpMatchVarsFile = httpFolder + "/matches.json" @@ -63,17 +63,13 @@ const ( nginxPlusConfigFile = httpFolder + "/plus-api.conf" ) -// ConfigFolders is a list of folders where NGINX configuration files are stored. -// Volumes here also need to be added to our crossplane ephemeral test container. -var ConfigFolders = []string{httpFolder, secretsFolder, includesFolder, mainIncludesFolder, streamFolder} - // Generator generates NGINX configuration files. // This interface is used for testing purposes only. type Generator interface { // Generate generates NGINX configuration files from internal representation. - Generate(configuration dataplane.Configuration) []file.File + Generate(configuration dataplane.Configuration) []agent.File // GenerateDeploymentContext generates the deployment context used for N+ licensing. - GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (file.File, error) + GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (agent.File, error) } // GeneratorImpl is an implementation of Generator. @@ -113,8 +109,8 @@ type executeFunc func(configuration dataplane.Configuration) []executeResult // It is the responsibility of the caller to validate the configuration before calling this function. // In case of invalid configuration, NGINX will fail to reload or could be configured with malicious configuration. // To validate, use the validators from the validation package. -func (g GeneratorImpl) Generate(conf dataplane.Configuration) []file.File { - files := make([]file.File, 0) +func (g GeneratorImpl) Generate(conf dataplane.Configuration) []agent.File { + files := make([]agent.File, 0) for id, pair := range conf.SSLKeyPairs { files = append(files, generatePEM(id, pair.Cert, pair.Key)) @@ -136,16 +132,19 @@ func (g GeneratorImpl) Generate(conf dataplane.Configuration) []file.File { // GenerateDeploymentContext generates the deployment_ctx.json file needed for N+ licensing. // It's exported since it's used by the init container process. -func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (file.File, error) { +func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (agent.File, error) { depCtxBytes, err := json.Marshal(depCtx) if err != nil { - return file.File{}, fmt.Errorf("error building deployment context for mgmt block: %w", err) + return agent.File{}, fmt.Errorf("error building deployment context for mgmt block: %w", err) } - deploymentCtxFile := file.File{ - Content: depCtxBytes, - Path: mainIncludesFolder + "/deployment_ctx.json", - Type: file.TypeRegular, + deploymentCtxFile := agent.File{ + Meta: &pb.FileMeta{ + Name: mainIncludesFolder + "/deployment_ctx.json", + Hash: filesHelper.GenerateHash(depCtxBytes), + Permissions: file.RegularFileMode, + }, + Contents: depCtxBytes, } return deploymentCtxFile, nil @@ -154,7 +153,7 @@ func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentCont func (g GeneratorImpl) executeConfigTemplates( conf dataplane.Configuration, generator policies.Generator, -) []file.File { +) []agent.File { fileBytes := make(map[string][]byte) httpUpstreams := g.createUpstreams(conf.Upstreams, upstreamsettings.NewProcessor()) @@ -167,17 +166,20 @@ func (g GeneratorImpl) executeConfigTemplates( } } - var mgmtFiles []file.File + var mgmtFiles []agent.File if g.plus { mgmtFiles = g.generateMgmtFiles(conf) } - files := make([]file.File, 0, len(fileBytes)+len(mgmtFiles)) + files := make([]agent.File, 0, len(fileBytes)+len(mgmtFiles)) for fp, bytes := range fileBytes { - files = append(files, file.File{ - Path: fp, - Content: bytes, - Type: file.TypeRegular, + files = append(files, agent.File{ + Meta: &pb.FileMeta{ + Name: fp, + Hash: filesHelper.GenerateHash(bytes), + Permissions: file.RegularFileMode, + }, + Contents: bytes, }) } files = append(files, mgmtFiles...) @@ -201,21 +203,23 @@ func (g GeneratorImpl) getExecuteFuncs( g.executeStreamServers, g.executeStreamUpstreams, executeStreamMaps, - executeVersion, executePlusAPI, } } -func generatePEM(id dataplane.SSLKeyPairID, cert []byte, key []byte) file.File { +func generatePEM(id dataplane.SSLKeyPairID, cert []byte, key []byte) agent.File { c := make([]byte, 0, len(cert)+len(key)+1) c = append(c, cert...) c = append(c, '\n') c = append(c, key...) - return file.File{ - Content: c, - Path: generatePEMFileName(id), - Type: file.TypeSecret, + return agent.File{ + Meta: &pb.FileMeta{ + Name: generatePEMFileName(id), + Hash: filesHelper.GenerateHash(c), + Permissions: file.SecretFileMode, + }, + Contents: c, } } @@ -223,11 +227,14 @@ func generatePEMFileName(id dataplane.SSLKeyPairID) string { return filepath.Join(secretsFolder, string(id)+".pem") } -func generateCertBundle(id dataplane.CertBundleID, cert []byte) file.File { - return file.File{ - Content: cert, - Path: generateCertBundleFileName(id), - Type: file.TypeRegular, +func generateCertBundle(id dataplane.CertBundleID, cert []byte) agent.File { + return agent.File{ + Meta: &pb.FileMeta{ + Name: generateCertBundleFileName(id), + Hash: filesHelper.GenerateHash(cert), + Permissions: file.SecretFileMode, + }, + Contents: cert, } } diff --git a/internal/mode/static/nginx/config/generator_test.go b/internal/mode/static/nginx/config/generator_test.go index 79bb35a90f..9e0fe140fa 100644 --- a/internal/mode/static/nginx/config/generator_test.go +++ b/internal/mode/static/nginx/config/generator_test.go @@ -1,18 +1,20 @@ package config_test import ( - "fmt" "sort" "testing" "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" @@ -145,15 +147,14 @@ func TestGenerate(t *testing.T) { files := generator.Generate(conf) - g.Expect(files).To(HaveLen(18)) + g.Expect(files).To(HaveLen(17)) arrange := func(i, j int) bool { - return files[i].Path < files[j].Path + return files[i].Meta.Name < files[j].Meta.Name } sort.Slice(files, arrange) /* Order of files: - /etc/nginx/conf.d/config-version.conf /etc/nginx/conf.d/http.conf /etc/nginx/conf.d/matches.json /etc/nginx/conf.d/plus-api.conf @@ -173,14 +174,9 @@ func TestGenerate(t *testing.T) { /etc/nginx/stream-conf.d/stream.conf */ - g.Expect(files[0].Type).To(Equal(file.TypeRegular)) - g.Expect(files[0].Path).To(Equal("/etc/nginx/conf.d/config-version.conf")) - configVersion := string(files[0].Content) - g.Expect(configVersion).To(ContainSubstring(fmt.Sprintf("return 200 %d", conf.Version))) - - g.Expect(files[1].Type).To(Equal(file.TypeRegular)) - g.Expect(files[1].Path).To(Equal("/etc/nginx/conf.d/http.conf")) - httpCfg := string(files[1].Content) // converting to string so that on failure gomega prints strings not byte arrays + g.Expect(files[0].Meta.Permissions).To(Equal(file.RegularFileMode)) + g.Expect(files[0].Meta.Name).To(Equal("/etc/nginx/conf.d/http.conf")) + httpCfg := string(files[0].Contents) // converting to string so that on failure gomega prints strings not byte arrays // Note: this only verifies that Generate() returns a byte array with upstream, server, and split_client blocks. // It does not test the correctness of those blocks. That functionality is covered by other tests in this package. g.Expect(httpCfg).To(ContainSubstring("listen 80")) @@ -197,14 +193,14 @@ func TestGenerate(t *testing.T) { g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet1.conf;")) g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet2.conf;")) - g.Expect(files[2].Path).To(Equal("/etc/nginx/conf.d/matches.json")) - g.Expect(files[2].Type).To(Equal(file.TypeRegular)) + g.Expect(files[1].Meta.Name).To(Equal("/etc/nginx/conf.d/matches.json")) + g.Expect(files[1].Meta.Permissions).To(Equal(file.RegularFileMode)) expString := "{}" - g.Expect(string(files[2].Content)).To(Equal(expString)) + g.Expect(string(files[1].Contents)).To(Equal(expString)) - g.Expect(files[3].Path).To(Equal("/etc/nginx/conf.d/plus-api.conf")) - g.Expect(files[3].Type).To(Equal(file.TypeRegular)) - httpCfg = string(files[3].Content) + g.Expect(files[2].Meta.Name).To(Equal("/etc/nginx/conf.d/plus-api.conf")) + g.Expect(files[2].Meta.Permissions).To(Equal(file.RegularFileMode)) + httpCfg = string(files[2].Contents) g.Expect(httpCfg).To(ContainSubstring("listen unix:/var/run/nginx/nginx-plus-api.sock;")) g.Expect(httpCfg).To(ContainSubstring("access_log off;")) g.Expect(httpCfg).To(ContainSubstring("listen 8765;")) @@ -217,26 +213,26 @@ func TestGenerate(t *testing.T) { // snippet include files // content is not checked in this test. - g.Expect(files[4].Path).To(Equal("/etc/nginx/includes/http_snippet1.conf")) - g.Expect(files[5].Path).To(Equal("/etc/nginx/includes/http_snippet2.conf")) - g.Expect(files[6].Path).To(Equal("/etc/nginx/includes/main_snippet1.conf")) - g.Expect(files[7].Path).To(Equal("/etc/nginx/includes/main_snippet2.conf")) + g.Expect(files[3].Meta.Name).To(Equal("/etc/nginx/includes/http_snippet1.conf")) + g.Expect(files[4].Meta.Name).To(Equal("/etc/nginx/includes/http_snippet2.conf")) + g.Expect(files[5].Meta.Name).To(Equal("/etc/nginx/includes/main_snippet1.conf")) + g.Expect(files[6].Meta.Name).To(Equal("/etc/nginx/includes/main_snippet2.conf")) - g.Expect(files[8].Path).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) - deploymentCtx := string(files[8].Content) + g.Expect(files[7].Meta.Name).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) + deploymentCtx := string(files[7].Contents) g.Expect(deploymentCtx).To(ContainSubstring("\"integration\":\"ngf\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_id\":\"test-uid\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"installation_id\":\"test-uid-replicaSet\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_node_count\":1")) - g.Expect(files[9].Path).To(Equal("/etc/nginx/main-includes/main.conf")) - mainConfStr := string(files[9].Content) + g.Expect(files[8].Meta.Name).To(Equal("/etc/nginx/main-includes/main.conf")) + mainConfStr := string(files[8].Contents) g.Expect(mainConfStr).To(ContainSubstring("load_module modules/ngx_otel_module.so;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet1.conf;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet2.conf;")) - g.Expect(files[10].Path).To(Equal("/etc/nginx/main-includes/mgmt.conf")) - mgmtConf := string(files[10].Content) + g.Expect(files[9].Meta.Name).To(Equal("/etc/nginx/main-includes/mgmt.conf")) + mgmtConf := string(files[9].Contents) g.Expect(mgmtConf).To(ContainSubstring("usage_report endpoint=test-endpoint")) g.Expect(mgmtConf).To(ContainSubstring("license_token /etc/nginx/secrets/license.jwt")) g.Expect(mgmtConf).To(ContainSubstring("deployment_context /etc/nginx/main-includes/deployment_ctx.json")) @@ -244,31 +240,34 @@ func TestGenerate(t *testing.T) { g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate /etc/nginx/secrets/mgmt-tls.crt")) g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate_key /etc/nginx/secrets/mgmt-tls.key")) - g.Expect(files[11].Path).To(Equal("/etc/nginx/secrets/license.jwt")) - g.Expect(string(files[11].Content)).To(Equal("license")) + g.Expect(files[10].Meta.Name).To(Equal("/etc/nginx/secrets/license.jwt")) + g.Expect(string(files[10].Contents)).To(Equal("license")) - g.Expect(files[12].Path).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) - g.Expect(string(files[12].Content)).To(Equal("ca")) + g.Expect(files[11].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) + g.Expect(string(files[11].Contents)).To(Equal("ca")) - g.Expect(files[13].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) - g.Expect(string(files[13].Content)).To(Equal("cert")) + g.Expect(files[12].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) + g.Expect(string(files[12].Contents)).To(Equal("cert")) - g.Expect(files[14].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) - g.Expect(string(files[14].Content)).To(Equal("key")) + g.Expect(files[13].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) + g.Expect(string(files[13].Contents)).To(Equal("key")) - g.Expect(files[15].Path).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) - certBundle := string(files[15].Content) + g.Expect(files[14].Meta.Name).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) + certBundle := string(files[14].Contents) g.Expect(certBundle).To(Equal("test-cert")) - g.Expect(files[16]).To(Equal(file.File{ - Type: file.TypeSecret, - Path: "/etc/nginx/secrets/test-keypair.pem", - Content: []byte("test-cert\ntest-key"), + g.Expect(files[15]).To(Equal(agent.File{ + Meta: &pb.FileMeta{ + Name: "/etc/nginx/secrets/test-keypair.pem", + Hash: filesHelper.GenerateHash([]byte("test-cert\ntest-key")), + Permissions: file.SecretFileMode, + }, + Contents: []byte("test-cert\ntest-key"), })) - g.Expect(files[17].Path).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) - g.Expect(files[17].Type).To(Equal(file.TypeRegular)) - streamCfg := string(files[17].Content) + g.Expect(files[16].Meta.Name).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) + g.Expect(files[16].Meta.Permissions).To(Equal(file.RegularFileMode)) + streamCfg := string(files[16].Contents) g.Expect(streamCfg).To(ContainSubstring("listen unix:/var/run/nginx/app.example.com-443.sock")) g.Expect(streamCfg).To(ContainSubstring("listen 443")) g.Expect(streamCfg).To(ContainSubstring("app.example.com unix:/var/run/nginx/app.example.com-443.sock")) diff --git a/internal/mode/static/nginx/config/main_config.go b/internal/mode/static/nginx/config/main_config.go index 8355c5d0db..edb837d2bc 100644 --- a/internal/mode/static/nginx/config/main_config.go +++ b/internal/mode/static/nginx/config/main_config.go @@ -3,9 +3,13 @@ package config import ( gotemplate "text/template" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/shared" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) @@ -50,7 +54,7 @@ type mgmtConf struct { // generateMgmtFiles generates the NGINX Plus configuration file for the mgmt block. As part of this, // it writes the secret and deployment context files that are referenced in the mgmt block. -func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.File { +func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []agent.File { if !g.plus { return nil } @@ -60,47 +64,59 @@ func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.Fi panic("nginx plus token not set in expected map") } - tokenFile := file.File{ - Content: tokenContent, - Path: secretsFolder + "/license.jwt", - Type: file.TypeSecret, + tokenFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/license.jwt", + Hash: filesHelper.GenerateHash(tokenContent), + Permissions: file.SecretFileMode, + }, + Contents: tokenContent, } - files := []file.File{tokenFile} + files := []agent.File{tokenFile} cfg := mgmtConf{ Endpoint: g.usageReportConfig.Endpoint, Resolver: g.usageReportConfig.Resolver, - LicenseTokenFile: tokenFile.Path, + LicenseTokenFile: tokenFile.Meta.Name, SkipVerify: g.usageReportConfig.SkipVerify, } if content, ok := conf.AuxiliarySecrets[graph.PlusReportCACertificate]; ok { - caFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-ca.crt", - Type: file.TypeSecret, + caFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-ca.crt", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.CACertFile = caFile.Path + cfg.CACertFile = caFile.Meta.Name files = append(files, caFile) } if content, ok := conf.AuxiliarySecrets[graph.PlusReportClientSSLCertificate]; ok { - certFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-tls.crt", - Type: file.TypeSecret, + certFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-tls.crt", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.ClientSSLCertFile = certFile.Path + cfg.ClientSSLCertFile = certFile.Meta.Name files = append(files, certFile) } if content, ok := conf.AuxiliarySecrets[graph.PlusReportClientSSLKey]; ok { - keyFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-tls.key", - Type: file.TypeSecret, + keyFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-tls.key", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.ClientSSLKeyFile = keyFile.Path + cfg.ClientSSLKeyFile = keyFile.Meta.Name files = append(files, keyFile) } @@ -111,10 +127,14 @@ func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.Fi files = append(files, deploymentCtxFile) } - mgmtBlockFile := file.File{ - Content: helpers.MustExecuteTemplate(mgmtConfigTemplate, cfg), - Path: mgmtIncludesFile, - Type: file.TypeRegular, + mgmtContents := helpers.MustExecuteTemplate(mgmtConfigTemplate, cfg) + mgmtBlockFile := agent.File{ + Meta: &pb.FileMeta{ + Name: mgmtIncludesFile, + Hash: filesHelper.GenerateHash(mgmtContents), + Permissions: file.RegularFileMode, + }, + Contents: mgmtContents, } return append(files, mgmtBlockFile) diff --git a/internal/mode/static/nginx/config/plus_api.go b/internal/mode/static/nginx/config/plus_api.go index 9b1894fe30..d4988bb838 100644 --- a/internal/mode/static/nginx/config/plus_api.go +++ b/internal/mode/static/nginx/config/plus_api.go @@ -10,15 +10,15 @@ import ( var plusAPITemplate = gotemplate.Must(gotemplate.New("plusAPI").Parse(plusAPITemplateText)) func executePlusAPI(conf dataplane.Configuration) []executeResult { - result := executeResult{ - dest: nginxPlusConfigFile, - } + var result executeResult // if AllowedAddresses is empty, it means that we are not running on nginx plus, and we don't want this generated if conf.NginxPlus.AllowedAddresses != nil { result = executeResult{ dest: nginxPlusConfigFile, data: helpers.MustExecuteTemplate(plusAPITemplate, conf.NginxPlus), } + } else { + return nil } return []executeResult{result} diff --git a/internal/mode/static/nginx/config/plus_api_test.go b/internal/mode/static/nginx/config/plus_api_test.go index 6afb79142a..f664143402 100644 --- a/internal/mode/static/nginx/config/plus_api_test.go +++ b/internal/mode/static/nginx/config/plus_api_test.go @@ -43,21 +43,7 @@ func TestExecutePlusAPI_EmptyNginxPlus(t *testing.T) { } g := NewWithT(t) - expSubStrings := map[string]int{ - "listen unix:/var/run/nginx/nginx-plus-api.sock;": 0, - "access_log off;": 0, - "api write=on;": 0, - "listen 8765;": 0, - "root /usr/share/nginx/html;": 0, - "allow 127.0.0.1;": 0, - "deny all;": 0, - "location = /dashboard.html {}": 0, - "api write=off;": 0, - } - for expSubStr, expCount := range expSubStrings { - res := executePlusAPI(conf) - g.Expect(res).To(HaveLen(1)) - g.Expect(expCount).To(Equal(strings.Count(string(res[0].data), expSubStr))) - } + res := executePlusAPI(conf) + g.Expect(res).To(BeNil()) } diff --git a/internal/mode/static/nginx/config/policies/clientsettings/validator.go b/internal/mode/static/nginx/config/policies/clientsettings/validator.go index 98198eb264..7c450b2379 100644 --- a/internal/mode/static/nginx/config/policies/clientsettings/validator.go +++ b/internal/mode/static/nginx/config/policies/clientsettings/validator.go @@ -25,7 +25,7 @@ func NewValidator(genericValidator validation.GenericValidator) *Validator { } // Validate validates the spec of a ClientSettingsPolicy. -func (v *Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { +func (v *Validator) Validate(policy policies.Policy) []conditions.Condition { csp := helpers.MustCastObject[*ngfAPI.ClientSettingsPolicy](policy) targetRefPath := field.NewPath("spec").Child("targetRef") @@ -43,6 +43,14 @@ func (v *Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) return nil } +// ValidateGlobalSettings validates a ClientSettingsPolicy with respect to the NginxProxy global settings. +func (v *Validator) ValidateGlobalSettings( + _ policies.Policy, + _ *policies.GlobalSettings, +) []conditions.Condition { + return nil +} + // Conflicts returns true if the two ClientSettingsPolicies conflict. func (v *Validator) Conflicts(polA, polB policies.Policy) bool { cspA := helpers.MustCastObject[*ngfAPI.ClientSettingsPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go b/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go index bce96d81c8..88b99ba292 100644 --- a/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go +++ b/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go @@ -143,7 +143,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, nil) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -154,7 +154,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := clientsettings.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -162,6 +162,15 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + v := clientsettings.NewValidator(validation.GenericValidator{}) + + g.Expect(v.ValidateGlobalSettings(nil, nil)).To(BeNil()) +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/observability/validator.go b/internal/mode/static/nginx/config/policies/observability/validator.go index 4d7182e128..798b3099c1 100644 --- a/internal/mode/static/nginx/config/policies/observability/validator.go +++ b/internal/mode/static/nginx/config/policies/observability/validator.go @@ -25,24 +25,9 @@ func NewValidator(genericValidator validation.GenericValidator) *Validator { } // Validate validates the spec of an ObservabilityPolicy. -func (v *Validator) Validate( - policy policies.Policy, - globalSettings *policies.GlobalSettings, -) []conditions.Condition { +func (v *Validator) Validate(policy policies.Policy) []conditions.Condition { obs := helpers.MustCastObject[*ngfAPIv1alpha2.ObservabilityPolicy](policy) - if globalSettings == nil || !globalSettings.NginxProxyValid { - return []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - } - } - - if !globalSettings.TelemetryEnabled { - return []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), - } - } - targetRefPath := field.NewPath("spec").Child("targetRefs") supportedKinds := []gatewayv1.Kind{kinds.HTTPRoute, kinds.GRPCRoute} supportedGroups := []gatewayv1.Group{gatewayv1.GroupName} @@ -60,6 +45,26 @@ func (v *Validator) Validate( return nil } +// ValidateGlobalSettings validates an ObservabilityPolicy with respect to the NginxProxy global settings. +func (v *Validator) ValidateGlobalSettings( + _ policies.Policy, + globalSettings *policies.GlobalSettings, +) []conditions.Condition { + if globalSettings == nil { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), + } + } + + if !globalSettings.TelemetryEnabled { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + } + } + + return nil +} + // Conflicts returns true if the two ObservabilityPolicies conflict. func (v *Validator) Conflicts(polA, polB policies.Policy) bool { a := helpers.MustCastObject[*ngfAPIv1alpha2.ObservabilityPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/observability/validator_test.go b/internal/mode/static/nginx/config/policies/observability/validator_test.go index 5b0894110d..9736320545 100644 --- a/internal/mode/static/nginx/config/policies/observability/validator_test.go +++ b/internal/mode/static/nginx/config/policies/observability/validator_test.go @@ -54,47 +54,18 @@ func createModifiedPolicy(mod policyModFunc) *ngfAPIv1alpha2.ObservabilityPolicy func TestValidator_Validate(t *testing.T) { t.Parallel() - globalSettings := &policies.GlobalSettings{ - NginxProxyValid: true, - TelemetryEnabled: true, - } tests := []struct { - name string - policy *ngfAPIv1alpha2.ObservabilityPolicy - globalSettings *policies.GlobalSettings - expConditions []conditions.Condition + name string + policy *ngfAPIv1alpha2.ObservabilityPolicy + expConditions []conditions.Condition }{ - { - name: "validation context is nil", - policy: createValidPolicy(), - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - }, - }, - { - name: "validation context is invalid", - policy: createValidPolicy(), - globalSettings: &policies.GlobalSettings{NginxProxyValid: false}, - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - }, - }, - { - name: "telemetry is not enabled", - policy: createValidPolicy(), - globalSettings: &policies.GlobalSettings{NginxProxyValid: true, TelemetryEnabled: false}, - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), - }, - }, { name: "invalid target ref; unsupported group", policy: createModifiedPolicy(func(p *ngfAPIv1alpha2.ObservabilityPolicy) *ngfAPIv1alpha2.ObservabilityPolicy { p.Spec.TargetRefs[0].Group = "Unsupported" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.targetRefs.group: Unsupported value: \"Unsupported\": " + "supported values: \"gateway.networking.k8s.io\""), @@ -106,7 +77,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.TargetRefs[0].Kind = "Unsupported" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.targetRefs.kind: Unsupported value: \"Unsupported\": " + "supported values: \"HTTPRoute\", \"GRPCRoute\""), @@ -118,7 +88,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.Strategy = "invalid" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.strategy: Unsupported value: \"invalid\": " + "supported values: \"ratio\", \"parent\""), @@ -130,7 +99,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.Context = helpers.GetPointer[ngfAPIv1alpha2.TraceContext]("invalid") return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.context: Unsupported value: \"invalid\": " + "supported values: \"extract\", \"inject\", \"propagate\", \"ignore\""), @@ -142,7 +110,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanName = helpers.GetPointer("invalid$$$") return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanName: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -155,7 +122,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanAttributes[0].Key = "invalid$$$" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanAttributes.key: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -168,7 +134,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanAttributes[0].Value = "invalid$$$" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanAttributes.value: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -176,10 +141,9 @@ func TestValidator_Validate(t *testing.T) { }, }, { - name: "valid", - policy: createValidPolicy(), - globalSettings: globalSettings, - expConditions: nil, + name: "valid", + policy: createValidPolicy(), + expConditions: nil, }, } @@ -190,7 +154,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, test.globalSettings) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -201,7 +165,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := observability.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -209,6 +173,49 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + globalSettings *policies.GlobalSettings + expConditions []conditions.Condition + }{ + { + name: "global settings are nil", + expConditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), + }, + }, + { + name: "telemetry is not enabled", + globalSettings: &policies.GlobalSettings{TelemetryEnabled: false}, + expConditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + { + name: "valid", + globalSettings: &policies.GlobalSettings{ + TelemetryEnabled: true, + }, + expConditions: nil, + }, + } + + v := observability.NewValidator(validation.GenericValidator{}) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conds := v.ValidateGlobalSettings(nil, test.globalSettings) + g.Expect(conds).To(Equal(test.expConditions)) + }) + } +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go b/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go index 43cfb7e87a..598c982837 100644 --- a/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go +++ b/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go @@ -21,11 +21,10 @@ type FakeValidator struct { conflictsReturnsOnCall map[int]struct { result1 bool } - ValidateStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + ValidateStub func(policies.Policy) []conditions.Condition validateMutex sync.RWMutex validateArgsForCall []struct { arg1 policies.Policy - arg2 *policies.GlobalSettings } validateReturns struct { result1 []conditions.Condition @@ -33,6 +32,18 @@ type FakeValidator struct { validateReturnsOnCall map[int]struct { result1 []conditions.Condition } + ValidateGlobalSettingsStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + validateGlobalSettingsMutex sync.RWMutex + validateGlobalSettingsArgsForCall []struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + } + validateGlobalSettingsReturns struct { + result1 []conditions.Condition + } + validateGlobalSettingsReturnsOnCall map[int]struct { + result1 []conditions.Condition + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -99,19 +110,18 @@ func (fake *FakeValidator) ConflictsReturnsOnCall(i int, result1 bool) { }{result1} } -func (fake *FakeValidator) Validate(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { +func (fake *FakeValidator) Validate(arg1 policies.Policy) []conditions.Condition { fake.validateMutex.Lock() ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] fake.validateArgsForCall = append(fake.validateArgsForCall, struct { arg1 policies.Policy - arg2 *policies.GlobalSettings - }{arg1, arg2}) + }{arg1}) stub := fake.ValidateStub fakeReturns := fake.validateReturns - fake.recordInvocation("Validate", []interface{}{arg1, arg2}) + fake.recordInvocation("Validate", []interface{}{arg1}) fake.validateMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1) } if specificReturn { return ret.result1 @@ -125,17 +135,17 @@ func (fake *FakeValidator) ValidateCallCount() int { return len(fake.validateArgsForCall) } -func (fake *FakeValidator) ValidateCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { +func (fake *FakeValidator) ValidateCalls(stub func(policies.Policy) []conditions.Condition) { fake.validateMutex.Lock() defer fake.validateMutex.Unlock() fake.ValidateStub = stub } -func (fake *FakeValidator) ValidateArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { +func (fake *FakeValidator) ValidateArgsForCall(i int) policies.Policy { fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() argsForCall := fake.validateArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakeValidator) ValidateReturns(result1 []conditions.Condition) { @@ -161,6 +171,68 @@ func (fake *FakeValidator) ValidateReturnsOnCall(i int, result1 []conditions.Con }{result1} } +func (fake *FakeValidator) ValidateGlobalSettings(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { + fake.validateGlobalSettingsMutex.Lock() + ret, specificReturn := fake.validateGlobalSettingsReturnsOnCall[len(fake.validateGlobalSettingsArgsForCall)] + fake.validateGlobalSettingsArgsForCall = append(fake.validateGlobalSettingsArgsForCall, struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + }{arg1, arg2}) + stub := fake.ValidateGlobalSettingsStub + fakeReturns := fake.validateGlobalSettingsReturns + fake.recordInvocation("ValidateGlobalSettings", []interface{}{arg1, arg2}) + fake.validateGlobalSettingsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeValidator) ValidateGlobalSettingsCallCount() int { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + return len(fake.validateGlobalSettingsArgsForCall) +} + +func (fake *FakeValidator) ValidateGlobalSettingsCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = stub +} + +func (fake *FakeValidator) ValidateGlobalSettingsArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + argsForCall := fake.validateGlobalSettingsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeValidator) ValidateGlobalSettingsReturns(result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + fake.validateGlobalSettingsReturns = struct { + result1 []conditions.Condition + }{result1} +} + +func (fake *FakeValidator) ValidateGlobalSettingsReturnsOnCall(i int, result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + if fake.validateGlobalSettingsReturnsOnCall == nil { + fake.validateGlobalSettingsReturnsOnCall = make(map[int]struct { + result1 []conditions.Condition + }) + } + fake.validateGlobalSettingsReturnsOnCall[i] = struct { + result1 []conditions.Condition + }{result1} +} + func (fake *FakeValidator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -168,6 +240,8 @@ func (fake *FakeValidator) Invocations() map[string][][]interface{} { defer fake.conflictsMutex.RUnlock() fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/nginx/config/policies/policy.go b/internal/mode/static/nginx/config/policies/policy.go index d65a375db8..93d6054155 100644 --- a/internal/mode/static/nginx/config/policies/policy.go +++ b/internal/mode/static/nginx/config/policies/policy.go @@ -24,8 +24,6 @@ type Policy interface { // GlobalSettings contains global settings from the current state of the graph that may be // needed for policy validation or generation if certain policies rely on those global settings. type GlobalSettings struct { - // NginxProxyValid is whether the NginxProxy resource is valid. - NginxProxyValid bool // TelemetryEnabled is whether telemetry is enabled in the NginxProxy resource. TelemetryEnabled bool } diff --git a/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go b/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go index c3c0a1af5b..aaabcbebc9 100644 --- a/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go +++ b/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go @@ -25,7 +25,7 @@ func NewValidator(genericValidator validation.GenericValidator) Validator { } // Validate validates the spec of an UpstreamsSettingsPolicy. -func (v Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { +func (v Validator) Validate(policy policies.Policy) []conditions.Condition { usp := helpers.MustCastObject[*ngfAPI.UpstreamSettingsPolicy](policy) targetRefsPath := field.NewPath("spec").Child("targetRefs") @@ -46,6 +46,14 @@ func (v Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) return nil } +// ValidateGlobalSettings validates an UpstreamSettingsPolicy with respect to the NginxProxy global settings. +func (v Validator) ValidateGlobalSettings( + _ policies.Policy, + _ *policies.GlobalSettings, +) []conditions.Condition { + return nil +} + // Conflicts returns true if the two UpstreamsSettingsPolicies conflict. func (v Validator) Conflicts(polA, polB policies.Policy) bool { cspA := helpers.MustCastObject[*ngfAPI.UpstreamSettingsPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go b/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go index e34f4738e0..85699ea297 100644 --- a/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go +++ b/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go @@ -132,7 +132,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, nil) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -143,7 +143,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := upstreamsettings.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -151,6 +151,15 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + v := upstreamsettings.NewValidator(validation.GenericValidator{}) + + g.Expect(v.ValidateGlobalSettings(nil, nil)).To(BeNil()) +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/validator.go b/internal/mode/static/nginx/config/policies/validator.go index 0bb7b58924..e618eeea17 100644 --- a/internal/mode/static/nginx/config/policies/validator.go +++ b/internal/mode/static/nginx/config/policies/validator.go @@ -16,7 +16,9 @@ import ( //counterfeiter:generate . Validator type Validator interface { // Validate validates an NGF Policy. - Validate(policy Policy, globalSettings *GlobalSettings) []conditions.Condition + Validate(policy Policy) []conditions.Condition + // ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. + ValidateGlobalSettings(policy Policy, globalSettings *GlobalSettings) []conditions.Condition // Conflicts returns true if the two Policies conflict. Conflicts(a, b Policy) bool } @@ -54,7 +56,7 @@ func NewManager( } // Validate validates the policy. -func (m *CompositeValidator) Validate(policy Policy, globalSettings *GlobalSettings) []conditions.Condition { +func (m *CompositeValidator) Validate(policy Policy) []conditions.Condition { gvk := m.mustExtractGVK(policy) validator, ok := m.validators[gvk] @@ -62,7 +64,22 @@ func (m *CompositeValidator) Validate(policy Policy, globalSettings *GlobalSetti panic(fmt.Sprintf("no validator registered for policy %T", policy)) } - return validator.Validate(policy, globalSettings) + return validator.Validate(policy) +} + +// ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. +func (m *CompositeValidator) ValidateGlobalSettings( + policy Policy, + globalSettings *GlobalSettings, +) []conditions.Condition { + gvk := m.mustExtractGVK(policy) + + validator, ok := m.validators[gvk] + if !ok { + panic(fmt.Sprintf("no validator registered for policy %T", policy)) + } + + return validator.ValidateGlobalSettings(policy, globalSettings) } // Conflicts returns true if the policies conflict. diff --git a/internal/mode/static/nginx/config/policies/validator_test.go b/internal/mode/static/nginx/config/policies/validator_test.go index 81b1ee87c8..6787d5360b 100644 --- a/internal/mode/static/nginx/config/policies/validator_test.go +++ b/internal/mode/static/nginx/config/policies/validator_test.go @@ -27,6 +27,13 @@ var _ = Describe("Policy CompositeValidator", func() { }, } + bananaGVK := schema.GroupVersionKind{Group: "fruit", Version: "1", Kind: "banana"} + bananaPolicy := &policiesfakes.FakePolicy{ + GetNameStub: func() string { + return "banana" + }, + } + mustExtractGVK := func(object client.Object) schema.GroupVersionKind { switch object.GetName() { case "apple": @@ -42,34 +49,54 @@ var _ = Describe("Policy CompositeValidator", func() { mustExtractGVK, policies.ManagerConfig{ Validator: &policiesfakes.FakeValidator{ - ValidateStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + ValidateStub: func(_ policies.Policy) []conditions.Condition { return []conditions.Condition{staticConds.NewPolicyInvalid("apple error")} }, + ValidateGlobalSettingsStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + return []conditions.Condition{staticConds.NewPolicyInvalid("apple global settings error")} + }, ConflictsStub: func(_ policies.Policy, _ policies.Policy) bool { return true }, }, GVK: appleGVK, }, policies.ManagerConfig{ Validator: &policiesfakes.FakeValidator{ - ValidateStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + ValidateStub: func(_ policies.Policy) []conditions.Condition { return []conditions.Condition{staticConds.NewPolicyInvalid("orange error")} }, + ValidateGlobalSettingsStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + return []conditions.Condition{staticConds.NewPolicyInvalid("orange global settings error")} + }, ConflictsStub: func(_ policies.Policy, _ policies.Policy) bool { return false }, }, GVK: orangeGVK, }, + policies.ManagerConfig{ + Validator: &policiesfakes.FakeValidator{}, + GVK: bananaGVK, + }, ) Context("Validation", func() { When("Policy is registered with manager", func() { It("Validates the policy", func() { - conds := mgr.Validate(applePolicy, nil) + globalSettings := &policies.GlobalSettings{} + + conds := mgr.Validate(applePolicy) Expect(conds).To(HaveLen(1)) Expect(conds[0].Message).To(Equal("apple error")) - conds = mgr.Validate(orangePolicy, nil) + conds = mgr.ValidateGlobalSettings(applePolicy, globalSettings) + Expect(conds).To(HaveLen(1)) + Expect(conds[0].Message).To(Equal("apple global settings error")) + + conds = mgr.Validate(orangePolicy) Expect(conds).To(HaveLen(1)) Expect(conds[0].Message).To(Equal("orange error")) + + conds = mgr.ValidateGlobalSettings(orangePolicy, globalSettings) + Expect(conds).To(HaveLen(1)) + Expect(conds[0].Message).To(Equal("orange global settings error")) }) It("Returns whether the policies conflict", func() { Expect(mgr.Conflicts(applePolicy, applePolicy)).To(BeTrue()) @@ -79,7 +106,7 @@ var _ = Describe("Policy CompositeValidator", func() { When("Policy is not registered with manager", func() { It("Panics on call to validate", func() { validate := func() { - _ = mgr.Validate(&policiesfakes.FakePolicy{}, nil) + _ = mgr.Validate(&policiesfakes.FakePolicy{}) } Expect(validate).To(Panic()) @@ -89,6 +116,13 @@ var _ = Describe("Policy CompositeValidator", func() { _ = mgr.Conflicts(&policiesfakes.FakePolicy{}, &policiesfakes.FakePolicy{}) } + Expect(conflict).To(Panic()) + }) + It("panics on call to conflicts when no validator is registered for policy", func() { + conflict := func() { + _ = mgr.Conflicts(bananaPolicy, bananaPolicy) + } + Expect(conflict).To(Panic()) }) }) diff --git a/internal/mode/static/nginx/config/version.go b/internal/mode/static/nginx/config/version.go deleted file mode 100644 index 20e677e270..0000000000 --- a/internal/mode/static/nginx/config/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package config - -import ( - gotemplate "text/template" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" -) - -var versionTemplate = gotemplate.Must(gotemplate.New("version").Parse(versionTemplateText)) - -func executeVersion(conf dataplane.Configuration) []executeResult { - result := executeResult{ - dest: configVersionFile, - data: helpers.MustExecuteTemplate(versionTemplate, conf.Version), - } - - return []executeResult{result} -} diff --git a/internal/mode/static/nginx/config/version_template.go b/internal/mode/static/nginx/config/version_template.go deleted file mode 100644 index ccf46e02cc..0000000000 --- a/internal/mode/static/nginx/config/version_template.go +++ /dev/null @@ -1,12 +0,0 @@ -package config - -const versionTemplateText = ` -server { - listen unix:/var/run/nginx/nginx-config-version.sock; - access_log off; - - location /version { - return 200 {{.}}; - } -} -` diff --git a/internal/mode/static/nginx/config/version_test.go b/internal/mode/static/nginx/config/version_test.go deleted file mode 100644 index ce5913ec95..0000000000 --- a/internal/mode/static/nginx/config/version_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package config - -import ( - "testing" - - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" -) - -func TestExecuteVersion(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - conf := dataplane.Configuration{Version: 42} - res := executeVersion(conf) - g.Expect(res).To(HaveLen(1)) - g.Expect(res[0].dest).To(Equal(configVersionFile)) - g.Expect(string(res[0].data)).To(ContainSubstring("return 200 42;")) -} diff --git a/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go b/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go deleted file mode 100644 index 90e3fe03d2..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go +++ /dev/null @@ -1,191 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "os" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" -) - -type FakeClearFoldersOSFileManager struct { - ReadDirStub func(string) ([]os.DirEntry, error) - readDirMutex sync.RWMutex - readDirArgsForCall []struct { - arg1 string - } - readDirReturns struct { - result1 []os.DirEntry - result2 error - } - readDirReturnsOnCall map[int]struct { - result1 []os.DirEntry - result2 error - } - RemoveStub func(string) error - removeMutex sync.RWMutex - removeArgsForCall []struct { - arg1 string - } - removeReturns struct { - result1 error - } - removeReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeClearFoldersOSFileManager) ReadDir(arg1 string) ([]os.DirEntry, error) { - fake.readDirMutex.Lock() - ret, specificReturn := fake.readDirReturnsOnCall[len(fake.readDirArgsForCall)] - fake.readDirArgsForCall = append(fake.readDirArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadDirStub - fakeReturns := fake.readDirReturns - fake.recordInvocation("ReadDir", []interface{}{arg1}) - fake.readDirMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirCallCount() int { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - return len(fake.readDirArgsForCall) -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirCalls(stub func(string) ([]os.DirEntry, error)) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = stub -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirArgsForCall(i int) string { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - argsForCall := fake.readDirArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirReturns(result1 []os.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - fake.readDirReturns = struct { - result1 []os.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirReturnsOnCall(i int, result1 []os.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - if fake.readDirReturnsOnCall == nil { - fake.readDirReturnsOnCall = make(map[int]struct { - result1 []os.DirEntry - result2 error - }) - } - fake.readDirReturnsOnCall[i] = struct { - result1 []os.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeClearFoldersOSFileManager) Remove(arg1 string) error { - fake.removeMutex.Lock() - ret, specificReturn := fake.removeReturnsOnCall[len(fake.removeArgsForCall)] - fake.removeArgsForCall = append(fake.removeArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveStub - fakeReturns := fake.removeReturns - fake.recordInvocation("Remove", []interface{}{arg1}) - fake.removeMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeClearFoldersOSFileManager) RemoveCallCount() int { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - return len(fake.removeArgsForCall) -} - -func (fake *FakeClearFoldersOSFileManager) RemoveCalls(stub func(string) error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = stub -} - -func (fake *FakeClearFoldersOSFileManager) RemoveArgsForCall(i int) string { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - argsForCall := fake.removeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeClearFoldersOSFileManager) RemoveReturns(result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - fake.removeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeClearFoldersOSFileManager) RemoveReturnsOnCall(i int, result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - if fake.removeReturnsOnCall == nil { - fake.removeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.removeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeClearFoldersOSFileManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeClearFoldersOSFileManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ file.ClearFoldersOSFileManager = new(FakeClearFoldersOSFileManager) diff --git a/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go b/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go deleted file mode 100644 index b51ecd7579..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go +++ /dev/null @@ -1,301 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "io/fs" - "sync" -) - -type FakeDirEntry struct { - InfoStub func() (fs.FileInfo, error) - infoMutex sync.RWMutex - infoArgsForCall []struct { - } - infoReturns struct { - result1 fs.FileInfo - result2 error - } - infoReturnsOnCall map[int]struct { - result1 fs.FileInfo - result2 error - } - IsDirStub func() bool - isDirMutex sync.RWMutex - isDirArgsForCall []struct { - } - isDirReturns struct { - result1 bool - } - isDirReturnsOnCall map[int]struct { - result1 bool - } - NameStub func() string - nameMutex sync.RWMutex - nameArgsForCall []struct { - } - nameReturns struct { - result1 string - } - nameReturnsOnCall map[int]struct { - result1 string - } - TypeStub func() fs.FileMode - typeMutex sync.RWMutex - typeArgsForCall []struct { - } - typeReturns struct { - result1 fs.FileMode - } - typeReturnsOnCall map[int]struct { - result1 fs.FileMode - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeDirEntry) Info() (fs.FileInfo, error) { - fake.infoMutex.Lock() - ret, specificReturn := fake.infoReturnsOnCall[len(fake.infoArgsForCall)] - fake.infoArgsForCall = append(fake.infoArgsForCall, struct { - }{}) - stub := fake.InfoStub - fakeReturns := fake.infoReturns - fake.recordInvocation("Info", []interface{}{}) - fake.infoMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeDirEntry) InfoCallCount() int { - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - return len(fake.infoArgsForCall) -} - -func (fake *FakeDirEntry) InfoCalls(stub func() (fs.FileInfo, error)) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = stub -} - -func (fake *FakeDirEntry) InfoReturns(result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - fake.infoReturns = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) InfoReturnsOnCall(i int, result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - if fake.infoReturnsOnCall == nil { - fake.infoReturnsOnCall = make(map[int]struct { - result1 fs.FileInfo - result2 error - }) - } - fake.infoReturnsOnCall[i] = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) IsDir() bool { - fake.isDirMutex.Lock() - ret, specificReturn := fake.isDirReturnsOnCall[len(fake.isDirArgsForCall)] - fake.isDirArgsForCall = append(fake.isDirArgsForCall, struct { - }{}) - stub := fake.IsDirStub - fakeReturns := fake.isDirReturns - fake.recordInvocation("IsDir", []interface{}{}) - fake.isDirMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) IsDirCallCount() int { - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - return len(fake.isDirArgsForCall) -} - -func (fake *FakeDirEntry) IsDirCalls(stub func() bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = stub -} - -func (fake *FakeDirEntry) IsDirReturns(result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - fake.isDirReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) IsDirReturnsOnCall(i int, result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - if fake.isDirReturnsOnCall == nil { - fake.isDirReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isDirReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) Name() string { - fake.nameMutex.Lock() - ret, specificReturn := fake.nameReturnsOnCall[len(fake.nameArgsForCall)] - fake.nameArgsForCall = append(fake.nameArgsForCall, struct { - }{}) - stub := fake.NameStub - fakeReturns := fake.nameReturns - fake.recordInvocation("Name", []interface{}{}) - fake.nameMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) NameCallCount() int { - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - return len(fake.nameArgsForCall) -} - -func (fake *FakeDirEntry) NameCalls(stub func() string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = stub -} - -func (fake *FakeDirEntry) NameReturns(result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - fake.nameReturns = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) NameReturnsOnCall(i int, result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - if fake.nameReturnsOnCall == nil { - fake.nameReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.nameReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) Type() fs.FileMode { - fake.typeMutex.Lock() - ret, specificReturn := fake.typeReturnsOnCall[len(fake.typeArgsForCall)] - fake.typeArgsForCall = append(fake.typeArgsForCall, struct { - }{}) - stub := fake.TypeStub - fakeReturns := fake.typeReturns - fake.recordInvocation("Type", []interface{}{}) - fake.typeMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) TypeCallCount() int { - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - return len(fake.typeArgsForCall) -} - -func (fake *FakeDirEntry) TypeCalls(stub func() fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = stub -} - -func (fake *FakeDirEntry) TypeReturns(result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - fake.typeReturns = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) TypeReturnsOnCall(i int, result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - if fake.typeReturnsOnCall == nil { - fake.typeReturnsOnCall = make(map[int]struct { - result1 fs.FileMode - }) - } - fake.typeReturnsOnCall[i] = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeDirEntry) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ fs.DirEntry = new(FakeDirEntry) diff --git a/internal/mode/static/nginx/file/filefakes/fake_manager.go b/internal/mode/static/nginx/file/filefakes/fake_manager.go deleted file mode 100644 index 52b34e8e72..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_manager.go +++ /dev/null @@ -1,116 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" -) - -type FakeManager struct { - ReplaceFilesStub func([]file.File) error - replaceFilesMutex sync.RWMutex - replaceFilesArgsForCall []struct { - arg1 []file.File - } - replaceFilesReturns struct { - result1 error - } - replaceFilesReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) ReplaceFiles(arg1 []file.File) error { - var arg1Copy []file.File - if arg1 != nil { - arg1Copy = make([]file.File, len(arg1)) - copy(arg1Copy, arg1) - } - fake.replaceFilesMutex.Lock() - ret, specificReturn := fake.replaceFilesReturnsOnCall[len(fake.replaceFilesArgsForCall)] - fake.replaceFilesArgsForCall = append(fake.replaceFilesArgsForCall, struct { - arg1 []file.File - }{arg1Copy}) - stub := fake.ReplaceFilesStub - fakeReturns := fake.replaceFilesReturns - fake.recordInvocation("ReplaceFiles", []interface{}{arg1Copy}) - fake.replaceFilesMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) ReplaceFilesCallCount() int { - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - return len(fake.replaceFilesArgsForCall) -} - -func (fake *FakeManager) ReplaceFilesCalls(stub func([]file.File) error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = stub -} - -func (fake *FakeManager) ReplaceFilesArgsForCall(i int) []file.File { - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - argsForCall := fake.replaceFilesArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeManager) ReplaceFilesReturns(result1 error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = nil - fake.replaceFilesReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) ReplaceFilesReturnsOnCall(i int, result1 error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = nil - if fake.replaceFilesReturnsOnCall == nil { - fake.replaceFilesReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.replaceFilesReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ file.Manager = new(FakeManager) diff --git a/internal/mode/static/nginx/file/folders.go b/internal/mode/static/nginx/file/folders.go deleted file mode 100644 index 847ca6312a..0000000000 --- a/internal/mode/static/nginx/file/folders.go +++ /dev/null @@ -1,56 +0,0 @@ -package file - -import ( - "fmt" - "os" - "path/filepath" - "slices" -) - -//counterfeiter:generate io/fs.DirEntry - -//counterfeiter:generate . ClearFoldersOSFileManager - -// ClearFoldersOSFileManager is an interface that exposes File I/O operations for ClearFolders. -// Used for unit testing. -type ClearFoldersOSFileManager interface { - // ReadDir returns the directory entries for the directory. - ReadDir(dirname string) ([]os.DirEntry, error) - // Remove removes the file with given name. - Remove(name string) error -} - -// These files are needed on startup, so skip deleting them. -const ( - mainConf = "/etc/nginx/main-includes/main.conf" - mgmtConf = "/etc/nginx/main-includes/mgmt.conf" - deployCtx = "/etc/nginx/main-includes/deployment_ctx.json" -) - -var ignoreFilePaths = []string{mainConf, mgmtConf, deployCtx} - -// ClearFolders removes all files in the given folders and returns the removed files' full paths. -func ClearFolders(fileMgr ClearFoldersOSFileManager, paths []string) (removedFiles []string, e error) { - for _, path := range paths { - entries, err := fileMgr.ReadDir(path) - if err != nil { - return removedFiles, fmt.Errorf("failed to read directory %q: %w", path, err) - } - - for _, entry := range entries { - entryPath := filepath.Join(path, entry.Name()) - - if slices.Contains(ignoreFilePaths, entryPath) { - continue - } - - if err := fileMgr.Remove(entryPath); err != nil { - return removedFiles, fmt.Errorf("failed to remove %q: %w", entryPath, err) - } - - removedFiles = append(removedFiles, entryPath) - } - } - - return removedFiles, nil -} diff --git a/internal/mode/static/nginx/file/folders_test.go b/internal/mode/static/nginx/file/folders_test.go deleted file mode 100644 index 8ae8fe1808..0000000000 --- a/internal/mode/static/nginx/file/folders_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package file_test - -import ( - "errors" - "os" - "path/filepath" - "testing" - - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" -) - -func writeFile(t *testing.T, name string, data []byte) { - t.Helper() - g := NewWithT(t) - - //nolint:gosec // the file permission is ok for unit testing - g.Expect(os.WriteFile(name, data, 0o644)).To(Succeed()) -} - -func TestClearFoldersRemoves(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - tempDir := t.TempDir() - - path1 := filepath.Join(tempDir, "path1") - writeFile(t, path1, []byte("test")) - path2 := filepath.Join(tempDir, "path2") - writeFile(t, path2, []byte("test")) - - removedFiles, err := file.ClearFolders(file.NewStdLibOSFileManager(), []string{tempDir}) - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(removedFiles).To(ConsistOf(path1, path2)) - - entries, err := os.ReadDir(tempDir) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(entries).To(BeEmpty()) -} - -func TestClearFoldersIgnoresPaths(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - fakeFileMgr := &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return []os.DirEntry{ - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "deployment_ctx.json" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "mgmt.conf" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "main.conf" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "can-be-removed.conf" - }, - }, - }, nil - }, - } - - removed, err := file.ClearFolders(fakeFileMgr, []string{"/etc/nginx/main-includes"}) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(removed).To(HaveLen(1)) - g.Expect(removed[0]).To(Equal("/etc/nginx/main-includes/can-be-removed.conf")) -} - -func TestClearFoldersFails(t *testing.T) { - t.Parallel() - files := []string{"file"} - - testErr := errors.New("test error") - - tests := []struct { - fileMgr *filefakes.FakeClearFoldersOSFileManager - name string - }{ - { - fileMgr: &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return nil, testErr - }, - }, - name: "ReadDir fails", - }, - { - fileMgr: &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return []os.DirEntry{ - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "file" - }, - }, - }, nil - }, - RemoveStub: func(_ string) error { - return testErr - }, - }, - name: "Remove fails", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - removedFiles, err := file.ClearFolders(test.fileMgr, files) - - g.Expect(err).To(MatchError(testErr)) - g.Expect(removedFiles).To(BeNil()) - }) - } -} diff --git a/internal/mode/static/nginx/file/manager.go b/internal/mode/static/nginx/file/manager.go deleted file mode 100644 index 52c64216f1..0000000000 --- a/internal/mode/static/nginx/file/manager.go +++ /dev/null @@ -1,176 +0,0 @@ -package file - -import ( - "errors" - "fmt" - "io" - "io/fs" - "os" - - "github.com/go-logr/logr" -) - -//go:generate go tool counterfeiter -generate - -const ( - // regularFileMode defines the default file mode for regular files. - regularFileMode = 0o644 - // secretFileMode defines the default file mode for files with secrets. - secretFileMode = 0o640 -) - -// Type is the type of File. -type Type int - -func (t Type) String() string { - switch t { - case TypeRegular: - return "Regular" - case TypeSecret: - return "Secret" - default: - return fmt.Sprintf("Unknown Type %d", t) - } -} - -const ( - // TypeRegular is the type for regular configuration files. - TypeRegular Type = iota - // TypeSecret is the type for secret files. - TypeSecret -) - -// File is a file that is part of NGINX configuration to be written to the file system. -type File struct { - Path string - Content []byte - Type Type -} - -//counterfeiter:generate . OSFileManager - -// OSFileManager is an interface that exposes File I/O operations for ManagerImpl. -// Used for unit testing. -type OSFileManager interface { - // ReadDir returns the directory entries for the directory. - ReadDir(dirname string) ([]fs.DirEntry, error) - // Remove file with given name. - Remove(name string) error - // Create file at the provided filepath. - Create(name string) (*os.File, error) - // Chmod sets the mode of the file. - Chmod(file *os.File, mode os.FileMode) error - // Write writes contents to the file. - Write(file *os.File, contents []byte) error - // Open opens the file. - Open(name string) (*os.File, error) - // Copy copies from src to dst. - Copy(dst io.Writer, src io.Reader) error -} - -//counterfeiter:generate . Manager - -// Manager manages NGINX configuration files. -type Manager interface { - // ReplaceFiles replaces the files on the file system with the given files removing any previous files. - ReplaceFiles(files []File) error -} - -// ManagerImpl is an implementation of Manager. -// Note: It is not thread safe. -type ManagerImpl struct { - logger logr.Logger - osFileManager OSFileManager - lastWrittenPaths []string -} - -// NewManagerImpl creates a new NewManagerImpl. -func NewManagerImpl(logger logr.Logger, osFileManager OSFileManager) *ManagerImpl { - return &ManagerImpl{ - logger: logger, - osFileManager: osFileManager, - } -} - -// ReplaceFiles replaces the files on the file system with the given files removing any previous files. -// It panics if a file type is unknown. -func (m *ManagerImpl) ReplaceFiles(files []File) error { - for _, path := range m.lastWrittenPaths { - if err := m.osFileManager.Remove(path); err != nil { - if os.IsNotExist(err) { - m.logger.Info( - "File not found when attempting to delete", - "path", path, - "error", err, - ) - continue - } - return fmt.Errorf("failed to delete file %q: %w", path, err) - } - - m.logger.V(1).Info("Deleted file", "path", path) - } - - // In some cases, NGINX reads files in runtime, like a JWK. If you remove such file, NGINX will fail - // any request (return 500 status code) that involves reading the file. - // However, we don't have such files yet, so we're not considering this case. - - m.lastWrittenPaths = make([]string, 0, len(files)) - - for _, file := range files { - if err := WriteFile(m.osFileManager, file); err != nil { - return fmt.Errorf("failed to write file %q of type %v: %w", file.Path, file.Type, err) - } - - m.lastWrittenPaths = append(m.lastWrittenPaths, file.Path) - m.logger.V(1).Info("Wrote file", "path", file.Path) - } - - return nil -} - -func WriteFile(fileMgr OSFileManager, file File) error { - ensureType(file.Type) - - f, err := fileMgr.Create(file.Path) - if err != nil { - return fmt.Errorf("failed to create file %q: %w", file.Path, err) - } - - var resultErr error - - defer func() { - if err := f.Close(); err != nil { - resultErr = errors.Join(resultErr, fmt.Errorf("failed to close file %q: %w", file.Path, err)) - } - }() - - switch file.Type { - case TypeRegular: - if err := fileMgr.Chmod(f, regularFileMode); err != nil { - resultErr = fmt.Errorf( - "failed to set file mode to %#o for %q: %w", regularFileMode, file.Path, err) - return resultErr - } - case TypeSecret: - if err := fileMgr.Chmod(f, secretFileMode); err != nil { - resultErr = fmt.Errorf("failed to set file mode to %#o for %q: %w", secretFileMode, file.Path, err) - return resultErr - } - default: - panic(fmt.Sprintf("unknown file type %d", file.Type)) - } - - if err := fileMgr.Write(f, file.Content); err != nil { - resultErr = fmt.Errorf("failed to write file %q: %w", file.Path, err) - return resultErr - } - - return resultErr -} - -func ensureType(fileType Type) { - if fileType != TypeRegular && fileType != TypeSecret { - panic(fmt.Sprintf("unknown file type %d", fileType)) - } -} diff --git a/internal/mode/static/nginx/file/manager_test.go b/internal/mode/static/nginx/file/manager_test.go deleted file mode 100644 index 114b81c3dc..0000000000 --- a/internal/mode/static/nginx/file/manager_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package file_test - -import ( - "errors" - "os" - "path/filepath" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" -) - -var _ = Describe("EventHandler", func() { - Describe("Replace files", Ordered, func() { - var ( - mgr *file.ManagerImpl - tmpDir string - regular1, regular2, regular3, secret file.File - ) - - ensureFiles := func(files []file.File) { - entries, err := os.ReadDir(tmpDir) - Expect(err).ToNot(HaveOccurred()) - Expect(entries).Should(HaveLen(len(files))) - - entriesMap := make(map[string]os.DirEntry) - for _, entry := range entries { - entriesMap[entry.Name()] = entry - } - - for _, f := range files { - _, ok := entriesMap[filepath.Base(f.Path)] - Expect(ok).Should(BeTrue()) - - info, err := os.Stat(f.Path) - Expect(err).ToNot(HaveOccurred()) - - Expect(info.IsDir()).To(BeFalse()) - - if f.Type == file.TypeRegular { - Expect(info.Mode()).To(Equal(os.FileMode(0o644))) - } else { - Expect(info.Mode()).To(Equal(os.FileMode(0o640))) - } - - bytes, err := os.ReadFile(f.Path) - Expect(err).ToNot(HaveOccurred()) - Expect(bytes).To(Equal(f.Content)) - } - } - - ensureNotExist := func(files ...file.File) { - for _, f := range files { - _, err := os.Stat(f.Path) - Expect(os.IsNotExist(err)).To(BeTrue()) - } - } - - BeforeAll(func() { - mgr = file.NewManagerImpl(logr.Discard(), file.NewStdLibOSFileManager()) - tmpDir = GinkgoT().TempDir() - - regular1 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-1.conf"), - Content: []byte("regular-1"), - } - regular2 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-2.conf"), - Content: []byte("regular-2"), - } - regular3 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-3.conf"), - Content: []byte("regular-3"), - } - secret = file.File{ - Type: file.TypeSecret, - Path: filepath.Join(tmpDir, "secret.conf"), - Content: []byte("secret"), - } - }) - - It("should write initial config", func() { - files := []file.File{regular1, regular2, secret} - - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - - ensureFiles(files) - }) - - It("should write subsequent config", func() { - files := []file.File{ - regular2, // overwriting - regular3, // adding - secret, // overwriting - } - - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - - ensureFiles(files) - ensureNotExist(regular1) - }) - - It("should remove all files", func() { - err := mgr.ReplaceFiles(nil) - Expect(err).ToNot(HaveOccurred()) - - ensureNotExist(regular2, regular3, secret) - }) - }) - - When("file does not exist", func() { - It("should not error", func() { - fakeOSMgr := &filefakes.FakeOSFileManager{} - mgr := file.NewManagerImpl(logr.Discard(), fakeOSMgr) - - files := []file.File{ - { - Type: file.TypeRegular, - Path: "regular-1.conf", - Content: []byte("regular-1"), - }, - } - - Expect(mgr.ReplaceFiles(files)).ToNot(HaveOccurred()) - - fakeOSMgr.RemoveReturns(os.ErrNotExist) - Expect(mgr.ReplaceFiles(files)).ToNot(HaveOccurred()) - }) - }) - - When("file type is not supported", func() { - It("should panic", func() { - mgr := file.NewManagerImpl(logr.Discard(), nil) - - files := []file.File{ - { - Type: 123, - Path: "unsupported.conf", - }, - } - - replace := func() { - _ = mgr.ReplaceFiles(files) - } - - Expect(replace).Should(Panic()) - }) - }) - - Describe("Edge cases with IO errors", func() { - var ( - files = []file.File{ - { - Type: file.TypeRegular, - Path: "regular.conf", - Content: []byte("regular"), - }, - { - Type: file.TypeSecret, - Path: "secret.conf", - Content: []byte("secret"), - }, - } - errTest = errors.New("test error") - ) - - DescribeTable( - "should return error on file IO error", - func(fakeOSMgr *filefakes.FakeOSFileManager) { - mgr := file.NewManagerImpl(logr.Discard(), fakeOSMgr) - - // special case for Remove - // to kick off removing, we need to successfully write files beforehand - if fakeOSMgr.RemoveStub != nil { - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - } - - err := mgr.ReplaceFiles(files) - Expect(err).Should(HaveOccurred()) - Expect(err).To(MatchError(errTest)) - }, - Entry( - "Remove", - &filefakes.FakeOSFileManager{ - RemoveStub: func(_ string) error { - return errTest - }, - }, - ), - Entry( - "Create", - &filefakes.FakeOSFileManager{ - CreateStub: func(_ string) (*os.File, error) { - return nil, errTest - }, - }, - ), - Entry( - "Chmod", - &filefakes.FakeOSFileManager{ - ChmodStub: func(_ *os.File, _ os.FileMode) error { - return errTest - }, - }, - ), - Entry( - "Write", - &filefakes.FakeOSFileManager{ - WriteStub: func(_ *os.File, _ []byte) error { - return errTest - }, - }, - ), - ) - }) -}) diff --git a/internal/mode/static/nginx/runtime/clients.go b/internal/mode/static/nginx/runtime/clients.go deleted file mode 100644 index a01a8ef09f..0000000000 --- a/internal/mode/static/nginx/runtime/clients.go +++ /dev/null @@ -1,39 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net" - "net/http" - - "github.com/nginxinc/nginx-plus-go-client/client" -) - -const ( - nginxPlusAPISock = "/var/run/nginx/nginx-plus-api.sock" - nginxPlusAPIURI = "http://nginx-plus-api/api" -) - -// CreatePlusClient returns a client for communicating with the NGINX Plus API. -func CreatePlusClient() (*client.NginxClient, error) { - var plusClient *client.NginxClient - var err error - - httpClient := GetSocketClient(nginxPlusAPISock) - plusClient, err = client.NewNginxClient(nginxPlusAPIURI, client.WithHTTPClient(&httpClient)) - if err != nil { - return nil, fmt.Errorf("failed to create NginxClient for Plus: %w", err) - } - return plusClient, nil -} - -// GetSocketClient gets an http.Client with a unix socket transport. -func GetSocketClient(sockPath string) http.Client { - return http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", sockPath) - }, - }, - } -} diff --git a/internal/mode/static/nginx/runtime/manager.go b/internal/mode/static/nginx/runtime/manager.go deleted file mode 100644 index 8c378f1e99..0000000000 --- a/internal/mode/static/nginx/runtime/manager.go +++ /dev/null @@ -1,284 +0,0 @@ -package runtime - -import ( - "context" - "errors" - "fmt" - "io/fs" - "os" - "strconv" - "strings" - "syscall" - "time" - - "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - "k8s.io/apimachinery/pkg/util/wait" -) - -//go:generate go tool counterfeiter -generate - -const ( - // PidFile specifies the location of the PID file for the Nginx process. - PidFile = "/var/run/nginx/nginx.pid" - // PidFileTimeout defines the timeout duration for accessing the PID file. - PidFileTimeout = 10000 * time.Millisecond - // NginxReloadTimeout sets the timeout duration for reloading the Nginx configuration. - NginxReloadTimeout = 60000 * time.Millisecond -) - -type ( - ReadFileFunc func(string) ([]byte, error) - CheckFileFunc func(string) (fs.FileInfo, error) -) - -var childProcPathFmt = "/proc/%[1]v/task/%[1]v/children" - -//counterfeiter:generate . NginxPlusClient - -type NginxPlusClient interface { - UpdateHTTPServers( - upstream string, - servers []ngxclient.UpstreamServer, - ) ( - added []ngxclient.UpstreamServer, - deleted []ngxclient.UpstreamServer, - updated []ngxclient.UpstreamServer, - err error, - ) - GetUpstreams() (*ngxclient.Upstreams, error) - UpdateStreamServers( - upstream string, - servers []ngxclient.StreamUpstreamServer, - ) ( - added []ngxclient.StreamUpstreamServer, - deleted []ngxclient.StreamUpstreamServer, - updated []ngxclient.StreamUpstreamServer, - err error, - ) - GetStreamUpstreams() (*ngxclient.StreamUpstreams, error) -} - -//counterfeiter:generate . Manager - -// Manager manages the runtime of NGINX. -type Manager interface { - // Reload reloads NGINX configuration. It is a blocking operation. - Reload(ctx context.Context, configVersion int) error - // IsPlus returns whether or not we are running NGINX plus. - IsPlus() bool - // GetUpstreams uses the NGINX Plus API to get the upstreams. - // Only usable if running NGINX Plus. - GetUpstreams() (ngxclient.Upstreams, ngxclient.StreamUpstreams, error) - // UpdateHTTPServers uses the NGINX Plus API to update HTTP upstream servers. - // Only usable if running NGINX Plus. - UpdateHTTPServers(string, []ngxclient.UpstreamServer) error - // UpdateStreamServers uses the NGINX Plus API to update stream upstream servers. - // Only usable if running NGINX Plus. - UpdateStreamServers(string, []ngxclient.StreamUpstreamServer) error -} - -// MetricsCollector is an interface for the metrics of the NGINX runtime manager. -// -//counterfeiter:generate . MetricsCollector -type MetricsCollector interface { - IncReloadCount() - IncReloadErrors() - ObserveLastReloadTime(ms time.Duration) -} - -// ManagerImpl implements Manager. -type ManagerImpl struct { - processHandler ProcessHandler - metricsCollector MetricsCollector - verifyClient nginxConfigVerifier - ngxPlusClient NginxPlusClient - logger logr.Logger -} - -// NewManagerImpl creates a new ManagerImpl. -func NewManagerImpl( - ngxPlusClient NginxPlusClient, - collector MetricsCollector, - logger logr.Logger, - processHandler ProcessHandler, - verifyClient nginxConfigVerifier, -) *ManagerImpl { - return &ManagerImpl{ - processHandler: processHandler, - metricsCollector: collector, - verifyClient: verifyClient, - ngxPlusClient: ngxPlusClient, - logger: logger, - } -} - -// IsPlus returns whether or not we are running NGINX plus. -func (m *ManagerImpl) IsPlus() bool { - return m.ngxPlusClient != nil -} - -func (m *ManagerImpl) Reload(ctx context.Context, configVersion int) error { - start := time.Now() - // We find the main NGINX PID on every reload because it will change if the NGINX container is restarted. - pid, err := m.processHandler.FindMainProcess(ctx, PidFileTimeout) - if err != nil { - return fmt.Errorf("failed to find NGINX main process: %w", err) - } - - childProcFile := fmt.Sprintf(childProcPathFmt, pid) - previousChildProcesses, err := m.processHandler.ReadFile(childProcFile) - if err != nil { - return err - } - - // send HUP signal to the NGINX main process reload configuration - // See https://nginx.org/en/docs/control.html - if errP := m.processHandler.Kill(pid); errP != nil { - m.metricsCollector.IncReloadErrors() - return fmt.Errorf("failed to send the HUP signal to NGINX main: %w", errP) - } - - if err = m.verifyClient.WaitForCorrectVersion( - ctx, - configVersion, - childProcFile, - previousChildProcesses, - os.ReadFile, - ); err != nil { - m.metricsCollector.IncReloadErrors() - return err - } - m.metricsCollector.IncReloadCount() - - finish := time.Now() - m.metricsCollector.ObserveLastReloadTime(finish.Sub(start)) - return nil -} - -// GetUpstreams uses the NGINX Plus API to get the upstreams. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) GetUpstreams() (ngxclient.Upstreams, ngxclient.StreamUpstreams, error) { - if !m.IsPlus() { - panic("cannot get upstream servers: NGINX Plus not enabled") - } - - upstreams, err := m.ngxPlusClient.GetUpstreams() - if err != nil { - return nil, nil, err - } - - if upstreams == nil { - return nil, nil, errors.New("GET upstreams returned nil value") - } - - streamUpstreams, err := m.ngxPlusClient.GetStreamUpstreams() - if err != nil { - return nil, nil, err - } - - if streamUpstreams == nil { - return nil, nil, errors.New("GET stream upstreams returned nil value") - } - - return *upstreams, *streamUpstreams, nil -} - -// UpdateHTTPServers uses the NGINX Plus API to update HTTP upstream servers. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) UpdateHTTPServers(upstream string, servers []ngxclient.UpstreamServer) error { - if !m.IsPlus() { - panic("cannot update HTTP upstream servers: NGINX Plus not enabled") - } - - added, deleted, updated, err := m.ngxPlusClient.UpdateHTTPServers(upstream, servers) - m.logger.V(1).Info("Added upstream servers", "count", len(added)) - m.logger.V(1).Info("Deleted upstream servers", "count", len(deleted)) - m.logger.V(1).Info("Updated upstream servers", "count", len(updated)) - - return err -} - -// UpdateStreamServers uses the NGINX Plus API to update stream upstream servers. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) UpdateStreamServers(upstream string, servers []ngxclient.StreamUpstreamServer) error { - if !m.IsPlus() { - panic("cannot update stream upstream servers: NGINX Plus not enabled") - } - - added, deleted, updated, err := m.ngxPlusClient.UpdateStreamServers(upstream, servers) - m.logger.V(1).Info("Added stream upstream servers", "count", len(added)) - m.logger.V(1).Info("Deleted stream upstream servers", "count", len(deleted)) - m.logger.V(1).Info("Updated stream upstream servers", "count", len(updated)) - - return err -} - -//counterfeiter:generate . ProcessHandler - -type ProcessHandler interface { - FindMainProcess( - ctx context.Context, - timeout time.Duration, - ) (int, error) - ReadFile(file string) ([]byte, error) - Kill(pid int) error -} - -type ProcessHandlerImpl struct { - readFile ReadFileFunc - checkFile CheckFileFunc -} - -func NewProcessHandlerImpl(readFile ReadFileFunc, checkFile CheckFileFunc) *ProcessHandlerImpl { - return &ProcessHandlerImpl{ - readFile: readFile, - checkFile: checkFile, - } -} - -func (p *ProcessHandlerImpl) FindMainProcess( - ctx context.Context, - timeout time.Duration, -) (int, error) { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - err := wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - _, err := p.checkFile(PidFile) - if err == nil { - return true, nil - } - if !errors.Is(err, fs.ErrNotExist) { - return false, err - } - return false, nil - }) - if err != nil { - return 0, err - } - - content, err := p.readFile(PidFile) - if err != nil { - return 0, err - } - - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("invalid pid file content %q: %w", content, err) - } - - return pid, nil -} - -func (p *ProcessHandlerImpl) ReadFile(file string) ([]byte, error) { - return p.readFile(file) -} - -func (p *ProcessHandlerImpl) Kill(pid int) error { - return syscall.Kill(pid, syscall.SIGHUP) -} diff --git a/internal/mode/static/nginx/runtime/manager_test.go b/internal/mode/static/nginx/runtime/manager_test.go deleted file mode 100644 index 1c40c03513..0000000000 --- a/internal/mode/static/nginx/runtime/manager_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package runtime_test - -import ( - "context" - "errors" - "fmt" - "io/fs" - "testing" - "time" - - "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime/runtimefakes" -) - -var _ = Describe("NGINX Runtime Manager", func() { - It("returns whether or not we're using NGINX Plus", func() { - mgr := runtime.NewManagerImpl(nil, nil, logr.Discard(), nil, nil) - Expect(mgr.IsPlus()).To(BeFalse()) - - mgr = runtime.NewManagerImpl(&ngxclient.NginxClient{}, nil, logr.Discard(), nil, nil) - Expect(mgr.IsPlus()).To(BeTrue()) - }) - - var ( - err error - manager runtime.Manager - upstreamServers []ngxclient.UpstreamServer - streamUpstreamServers []ngxclient.StreamUpstreamServer - ngxPlusClient *runtimefakes.FakeNginxPlusClient - process *runtimefakes.FakeProcessHandler - - metrics *runtimefakes.FakeMetricsCollector - verifyClient *runtimefakes.FakeVerifyClient - ) - - BeforeEach(func() { - upstreamServers = []ngxclient.UpstreamServer{ - {}, - } - streamUpstreamServers = []ngxclient.StreamUpstreamServer{ - {}, - } - }) - - Context("Reload", func() { - BeforeEach(func() { - ngxPlusClient = &runtimefakes.FakeNginxPlusClient{} - process = &runtimefakes.FakeProcessHandler{} - metrics = &runtimefakes.FakeMetricsCollector{} - verifyClient = &runtimefakes.FakeVerifyClient{} - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - }) - - It("Is successful", func() { - Expect(manager.Reload(context.Background(), 1)).To(Succeed()) - - Expect(process.FindMainProcessCallCount()).To(Equal(1)) - Expect(process.ReadFileCallCount()).To(Equal(1)) - Expect(process.KillCallCount()).To(Equal(1)) - Expect(metrics.IncReloadCountCallCount()).To(Equal(1)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(1)) - Expect(metrics.ObserveLastReloadTimeCallCount()).To(Equal(1)) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(0)) - }) - - It("Fails to find the main process", func() { - process.FindMainProcessReturns(0, fmt.Errorf("failed to find process")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to find NGINX main process: failed to find process")) - Expect(process.ReadFileCallCount()).To(Equal(0)) - Expect(process.KillCallCount()).To(Equal(0)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("Fails to read file", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns(nil, fmt.Errorf("failed to read file")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to read file")) - Expect(process.KillCallCount()).To(Equal(0)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("Fails to send kill signal", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns([]byte("child1\nchild2"), nil) - process.KillReturns(fmt.Errorf("failed to send kill signal")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to send the HUP signal to NGINX main: failed to send kill signal")) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(1)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("times out waiting for correct version", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns([]byte("child1\nchild2"), nil) - process.KillReturns(nil) - verifyClient.WaitForCorrectVersionReturns(fmt.Errorf("timeout waiting for correct version")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("timeout waiting for correct version")) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(1)) - }) - - When("MetricsCollector is nil", func() { - It("panics", func() { - metrics = nil - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - - reload := func() { - err = manager.Reload(context.Background(), 0) - } - - Expect(reload).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - When("VerifyClient is nil", func() { - It("panics", func() { - metrics = &runtimefakes.FakeMetricsCollector{} - verifyClient = nil - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - - reload := func() { - err = manager.Reload(context.Background(), 0) - } - - Expect(reload).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) - }) - - When("running NGINX plus", func() { - BeforeEach(func() { - ngxPlusClient = &runtimefakes.FakeNginxPlusClient{} - manager = runtime.NewManagerImpl(ngxPlusClient, nil, logr.Discard(), nil, nil) - }) - - It("successfully updates HTTP server upstream", func() { - Expect(manager.UpdateHTTPServers("test", upstreamServers)).To(Succeed()) - }) - - It("successfully updates stream server upstream", func() { - Expect(manager.UpdateStreamServers("test", streamUpstreamServers)).To(Succeed()) - }) - - It("returns no upstreams from NGINX Plus API when upstreams are nil", func() { - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(upstreams).To(BeEmpty()) - Expect(streamUpstreams).To(BeEmpty()) - }) - - It("successfully returns server upstreams", func() { - expUpstreams := ngxclient.Upstreams{ - "upstream1": { - Zone: "zone1", - Peers: []ngxclient.Peer{ - {ID: 1, Name: "peer1-name"}, - }, - Queue: ngxclient.Queue{Size: 10}, - Zombies: 2, - }, - "upstream2": { - Zone: "zone2", - Peers: []ngxclient.Peer{ - {ID: 2, Name: "peer2-name"}, - }, - Queue: ngxclient.Queue{Size: 20}, - Zombies: 1, - }, - } - - expStreamUpstreams := ngxclient.StreamUpstreams{ - "upstream1": { - Zone: "zone1", - Peers: []ngxclient.StreamPeer{ - {ID: 1, Name: "peer1-name"}, - }, - Zombies: 2, - }, - "upstream2": { - Zone: "zone2", - Peers: []ngxclient.StreamPeer{ - {ID: 2, Name: "peer2-name"}, - }, - Zombies: 1, - }, - } - - ngxPlusClient.GetUpstreamsReturns(&expUpstreams, nil) - ngxPlusClient.GetStreamUpstreamsReturns(&expStreamUpstreams, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).NotTo(HaveOccurred()) - Expect(expUpstreams).To(Equal(upstreams)) - Expect(expStreamUpstreams).To(Equal(streamUpstreams)) - }) - - It("returns an error when GetUpstreams fails", func() { - ngxPlusClient.GetUpstreamsReturns(nil, errors.New("failed to get upstreams")) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("failed to get upstreams")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetUpstreams returns nil", func() { - ngxPlusClient.GetUpstreamsReturns(nil, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("GET upstreams returned nil value")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetStreamUpstreams fails", func() { - ngxPlusClient.GetUpstreamsReturns(&ngxclient.Upstreams{}, nil) - ngxPlusClient.GetStreamUpstreamsReturns(nil, errors.New("failed to get upstreams")) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("failed to get upstreams")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetStreamUpstreams returns nil", func() { - ngxPlusClient.GetUpstreamsReturns(&ngxclient.Upstreams{}, nil) - ngxPlusClient.GetStreamUpstreamsReturns(nil, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("GET stream upstreams returned nil value")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - }) - - When("not running NGINX plus", func() { - BeforeEach(func() { - ngxPlusClient = nil - manager = runtime.NewManagerImpl(ngxPlusClient, nil, logr.Discard(), nil, nil) - }) - - It("should panic when fetching upstream servers", func() { - upstreams := func() { - _, _, err = manager.GetUpstreams() - } - - Expect(upstreams).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should panic when updating HTTP upstream servers", func() { - updateServers := func() { - err = manager.UpdateHTTPServers("test", upstreamServers) - } - - Expect(updateServers).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should panic when updating stream upstream servers", func() { - updateServers := func() { - err = manager.UpdateStreamServers("test", streamUpstreamServers) - } - - Expect(updateServers).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) -}) - -func TestFindMainProcess(t *testing.T) { - t.Parallel() - readFileFuncGen := func(content []byte) runtime.ReadFileFunc { - return func(name string) ([]byte, error) { - if name != runtime.PidFile { - return nil, errors.New("error") - } - return content, nil - } - } - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - checkFileFuncGen := func(content fs.FileInfo) runtime.CheckFileFunc { - return func(name string) (fs.FileInfo, error) { - if name != runtime.PidFile { - return nil, errors.New("error") - } - return content, nil - } - } - checkFileError := func(string) (fs.FileInfo, error) { - return nil, errors.New("error") - } - var testFileInfo fs.FileInfo - ctx := context.Background() - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel) - - tests := []struct { - ctx context.Context - readFile runtime.ReadFileFunc - checkFile runtime.CheckFileFunc - name string - expected int - expectError bool - }{ - { - ctx: ctx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 1, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "empty file content", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("not a number")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "bad file content", - }, - { - ctx: ctx, - readFile: readFileError, - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "cannot read file", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileError, - expected: 0, - expectError: true, - name: "cannot find pid file", - }, - { - ctx: cancellingCtx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileError, - expected: 0, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - p := runtime.NewProcessHandlerImpl( - test.readFile, - test.checkFile) - result, err := p.FindMainProcess(test.ctx, 2*time.Millisecond) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(test.expected)) - } - }) - } -} diff --git a/internal/mode/static/nginx/runtime/runtime_suite_test.go b/internal/mode/static/nginx/runtime/runtime_suite_test.go deleted file mode 100644 index 8916c4bf14..0000000000 --- a/internal/mode/static/nginx/runtime/runtime_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package runtime_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestRuntime(t *testing.T) { - t.Parallel() - RegisterFailHandler(Fail) - RunSpecs(t, "Runtime Suite") -} diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go deleted file mode 100644 index ef8bd1668f..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go +++ /dev/null @@ -1,417 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginxinc/nginx-plus-go-client/client" -) - -type FakeManager struct { - GetUpstreamsStub func() (client.Upstreams, client.StreamUpstreams, error) - getUpstreamsMutex sync.RWMutex - getUpstreamsArgsForCall []struct { - } - getUpstreamsReturns struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - } - getUpstreamsReturnsOnCall map[int]struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - } - IsPlusStub func() bool - isPlusMutex sync.RWMutex - isPlusArgsForCall []struct { - } - isPlusReturns struct { - result1 bool - } - isPlusReturnsOnCall map[int]struct { - result1 bool - } - ReloadStub func(context.Context, int) error - reloadMutex sync.RWMutex - reloadArgsForCall []struct { - arg1 context.Context - arg2 int - } - reloadReturns struct { - result1 error - } - reloadReturnsOnCall map[int]struct { - result1 error - } - UpdateHTTPServersStub func(string, []client.UpstreamServer) error - updateHTTPServersMutex sync.RWMutex - updateHTTPServersArgsForCall []struct { - arg1 string - arg2 []client.UpstreamServer - } - updateHTTPServersReturns struct { - result1 error - } - updateHTTPServersReturnsOnCall map[int]struct { - result1 error - } - UpdateStreamServersStub func(string, []client.StreamUpstreamServer) error - updateStreamServersMutex sync.RWMutex - updateStreamServersArgsForCall []struct { - arg1 string - arg2 []client.StreamUpstreamServer - } - updateStreamServersReturns struct { - result1 error - } - updateStreamServersReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) GetUpstreams() (client.Upstreams, client.StreamUpstreams, error) { - fake.getUpstreamsMutex.Lock() - ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] - fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetUpstreamsStub - fakeReturns := fake.getUpstreamsReturns - fake.recordInvocation("GetUpstreams", []interface{}{}) - fake.getUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2, ret.result3 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 -} - -func (fake *FakeManager) GetUpstreamsCallCount() int { - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - return len(fake.getUpstreamsArgsForCall) -} - -func (fake *FakeManager) GetUpstreamsCalls(stub func() (client.Upstreams, client.StreamUpstreams, error)) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = stub -} - -func (fake *FakeManager) GetUpstreamsReturns(result1 client.Upstreams, result2 client.StreamUpstreams, result3 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - fake.getUpstreamsReturns = struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }{result1, result2, result3} -} - -func (fake *FakeManager) GetUpstreamsReturnsOnCall(i int, result1 client.Upstreams, result2 client.StreamUpstreams, result3 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - if fake.getUpstreamsReturnsOnCall == nil { - fake.getUpstreamsReturnsOnCall = make(map[int]struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }) - } - fake.getUpstreamsReturnsOnCall[i] = struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }{result1, result2, result3} -} - -func (fake *FakeManager) IsPlus() bool { - fake.isPlusMutex.Lock() - ret, specificReturn := fake.isPlusReturnsOnCall[len(fake.isPlusArgsForCall)] - fake.isPlusArgsForCall = append(fake.isPlusArgsForCall, struct { - }{}) - stub := fake.IsPlusStub - fakeReturns := fake.isPlusReturns - fake.recordInvocation("IsPlus", []interface{}{}) - fake.isPlusMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) IsPlusCallCount() int { - fake.isPlusMutex.RLock() - defer fake.isPlusMutex.RUnlock() - return len(fake.isPlusArgsForCall) -} - -func (fake *FakeManager) IsPlusCalls(stub func() bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = stub -} - -func (fake *FakeManager) IsPlusReturns(result1 bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = nil - fake.isPlusReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeManager) IsPlusReturnsOnCall(i int, result1 bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = nil - if fake.isPlusReturnsOnCall == nil { - fake.isPlusReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isPlusReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeManager) Reload(arg1 context.Context, arg2 int) error { - fake.reloadMutex.Lock() - ret, specificReturn := fake.reloadReturnsOnCall[len(fake.reloadArgsForCall)] - fake.reloadArgsForCall = append(fake.reloadArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.ReloadStub - fakeReturns := fake.reloadReturns - fake.recordInvocation("Reload", []interface{}{arg1, arg2}) - fake.reloadMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) ReloadCallCount() int { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - return len(fake.reloadArgsForCall) -} - -func (fake *FakeManager) ReloadCalls(stub func(context.Context, int) error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = stub -} - -func (fake *FakeManager) ReloadArgsForCall(i int) (context.Context, int) { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - argsForCall := fake.reloadArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) ReloadReturns(result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - fake.reloadReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) ReloadReturnsOnCall(i int, result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - if fake.reloadReturnsOnCall == nil { - fake.reloadReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.reloadReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateHTTPServers(arg1 string, arg2 []client.UpstreamServer) error { - var arg2Copy []client.UpstreamServer - if arg2 != nil { - arg2Copy = make([]client.UpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateHTTPServersMutex.Lock() - ret, specificReturn := fake.updateHTTPServersReturnsOnCall[len(fake.updateHTTPServersArgsForCall)] - fake.updateHTTPServersArgsForCall = append(fake.updateHTTPServersArgsForCall, struct { - arg1 string - arg2 []client.UpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateHTTPServersStub - fakeReturns := fake.updateHTTPServersReturns - fake.recordInvocation("UpdateHTTPServers", []interface{}{arg1, arg2Copy}) - fake.updateHTTPServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) UpdateHTTPServersCallCount() int { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - return len(fake.updateHTTPServersArgsForCall) -} - -func (fake *FakeManager) UpdateHTTPServersCalls(stub func(string, []client.UpstreamServer) error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = stub -} - -func (fake *FakeManager) UpdateHTTPServersArgsForCall(i int) (string, []client.UpstreamServer) { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - argsForCall := fake.updateHTTPServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) UpdateHTTPServersReturns(result1 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - fake.updateHTTPServersReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateHTTPServersReturnsOnCall(i int, result1 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - if fake.updateHTTPServersReturnsOnCall == nil { - fake.updateHTTPServersReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.updateHTTPServersReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateStreamServers(arg1 string, arg2 []client.StreamUpstreamServer) error { - var arg2Copy []client.StreamUpstreamServer - if arg2 != nil { - arg2Copy = make([]client.StreamUpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateStreamServersMutex.Lock() - ret, specificReturn := fake.updateStreamServersReturnsOnCall[len(fake.updateStreamServersArgsForCall)] - fake.updateStreamServersArgsForCall = append(fake.updateStreamServersArgsForCall, struct { - arg1 string - arg2 []client.StreamUpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateStreamServersStub - fakeReturns := fake.updateStreamServersReturns - fake.recordInvocation("UpdateStreamServers", []interface{}{arg1, arg2Copy}) - fake.updateStreamServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) UpdateStreamServersCallCount() int { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - return len(fake.updateStreamServersArgsForCall) -} - -func (fake *FakeManager) UpdateStreamServersCalls(stub func(string, []client.StreamUpstreamServer) error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = stub -} - -func (fake *FakeManager) UpdateStreamServersArgsForCall(i int) (string, []client.StreamUpstreamServer) { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - argsForCall := fake.updateStreamServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) UpdateStreamServersReturns(result1 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - fake.updateStreamServersReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateStreamServersReturnsOnCall(i int, result1 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - if fake.updateStreamServersReturnsOnCall == nil { - fake.updateStreamServersReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.updateStreamServersReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - fake.isPlusMutex.RLock() - defer fake.isPlusMutex.RUnlock() - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.Manager = new(FakeManager) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go deleted file mode 100644 index 10215a6758..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "sync" - "time" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeMetricsCollector struct { - IncReloadCountStub func() - incReloadCountMutex sync.RWMutex - incReloadCountArgsForCall []struct { - } - IncReloadErrorsStub func() - incReloadErrorsMutex sync.RWMutex - incReloadErrorsArgsForCall []struct { - } - ObserveLastReloadTimeStub func(time.Duration) - observeLastReloadTimeMutex sync.RWMutex - observeLastReloadTimeArgsForCall []struct { - arg1 time.Duration - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeMetricsCollector) IncReloadCount() { - fake.incReloadCountMutex.Lock() - fake.incReloadCountArgsForCall = append(fake.incReloadCountArgsForCall, struct { - }{}) - stub := fake.IncReloadCountStub - fake.recordInvocation("IncReloadCount", []interface{}{}) - fake.incReloadCountMutex.Unlock() - if stub != nil { - fake.IncReloadCountStub() - } -} - -func (fake *FakeMetricsCollector) IncReloadCountCallCount() int { - fake.incReloadCountMutex.RLock() - defer fake.incReloadCountMutex.RUnlock() - return len(fake.incReloadCountArgsForCall) -} - -func (fake *FakeMetricsCollector) IncReloadCountCalls(stub func()) { - fake.incReloadCountMutex.Lock() - defer fake.incReloadCountMutex.Unlock() - fake.IncReloadCountStub = stub -} - -func (fake *FakeMetricsCollector) IncReloadErrors() { - fake.incReloadErrorsMutex.Lock() - fake.incReloadErrorsArgsForCall = append(fake.incReloadErrorsArgsForCall, struct { - }{}) - stub := fake.IncReloadErrorsStub - fake.recordInvocation("IncReloadErrors", []interface{}{}) - fake.incReloadErrorsMutex.Unlock() - if stub != nil { - fake.IncReloadErrorsStub() - } -} - -func (fake *FakeMetricsCollector) IncReloadErrorsCallCount() int { - fake.incReloadErrorsMutex.RLock() - defer fake.incReloadErrorsMutex.RUnlock() - return len(fake.incReloadErrorsArgsForCall) -} - -func (fake *FakeMetricsCollector) IncReloadErrorsCalls(stub func()) { - fake.incReloadErrorsMutex.Lock() - defer fake.incReloadErrorsMutex.Unlock() - fake.IncReloadErrorsStub = stub -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTime(arg1 time.Duration) { - fake.observeLastReloadTimeMutex.Lock() - fake.observeLastReloadTimeArgsForCall = append(fake.observeLastReloadTimeArgsForCall, struct { - arg1 time.Duration - }{arg1}) - stub := fake.ObserveLastReloadTimeStub - fake.recordInvocation("ObserveLastReloadTime", []interface{}{arg1}) - fake.observeLastReloadTimeMutex.Unlock() - if stub != nil { - fake.ObserveLastReloadTimeStub(arg1) - } -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeCallCount() int { - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - return len(fake.observeLastReloadTimeArgsForCall) -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeCalls(stub func(time.Duration)) { - fake.observeLastReloadTimeMutex.Lock() - defer fake.observeLastReloadTimeMutex.Unlock() - fake.ObserveLastReloadTimeStub = stub -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeArgsForCall(i int) time.Duration { - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - argsForCall := fake.observeLastReloadTimeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeMetricsCollector) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.incReloadCountMutex.RLock() - defer fake.incReloadCountMutex.RUnlock() - fake.incReloadErrorsMutex.RLock() - defer fake.incReloadErrorsMutex.RUnlock() - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeMetricsCollector) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.MetricsCollector = new(FakeMetricsCollector) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go deleted file mode 100644 index 3b1e522c72..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeNginxConfigVerifier struct { - EnsureConfigVersionStub func(context.Context, int) error - ensureConfigVersionMutex sync.RWMutex - ensureConfigVersionArgsForCall []struct { - arg1 context.Context - arg2 int - } - ensureConfigVersionReturns struct { - result1 error - } - ensureConfigVersionReturnsOnCall map[int]struct { - result1 error - } - GetConfigVersionStub func() (int, error) - getConfigVersionMutex sync.RWMutex - getConfigVersionArgsForCall []struct { - } - getConfigVersionReturns struct { - result1 int - result2 error - } - getConfigVersionReturnsOnCall map[int]struct { - result1 int - result2 error - } - WaitForCorrectVersionStub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error - waitForCorrectVersionMutex sync.RWMutex - waitForCorrectVersionArgsForCall []struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - } - waitForCorrectVersionReturns struct { - result1 error - } - waitForCorrectVersionReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersion(arg1 context.Context, arg2 int) error { - fake.ensureConfigVersionMutex.Lock() - ret, specificReturn := fake.ensureConfigVersionReturnsOnCall[len(fake.ensureConfigVersionArgsForCall)] - fake.ensureConfigVersionArgsForCall = append(fake.ensureConfigVersionArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.EnsureConfigVersionStub - fakeReturns := fake.ensureConfigVersionReturns - fake.recordInvocation("EnsureConfigVersion", []interface{}{arg1, arg2}) - fake.ensureConfigVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionCallCount() int { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - return len(fake.ensureConfigVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionCalls(stub func(context.Context, int) error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionArgsForCall(i int) (context.Context, int) { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - argsForCall := fake.ensureConfigVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionReturns(result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - fake.ensureConfigVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionReturnsOnCall(i int, result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - if fake.ensureConfigVersionReturnsOnCall == nil { - fake.ensureConfigVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.ensureConfigVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersion() (int, error) { - fake.getConfigVersionMutex.Lock() - ret, specificReturn := fake.getConfigVersionReturnsOnCall[len(fake.getConfigVersionArgsForCall)] - fake.getConfigVersionArgsForCall = append(fake.getConfigVersionArgsForCall, struct { - }{}) - stub := fake.GetConfigVersionStub - fakeReturns := fake.getConfigVersionReturns - fake.recordInvocation("GetConfigVersion", []interface{}{}) - fake.getConfigVersionMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionCallCount() int { - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - return len(fake.getConfigVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionCalls(stub func() (int, error)) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionReturns(result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - fake.getConfigVersionReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionReturnsOnCall(i int, result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - if fake.getConfigVersionReturnsOnCall == nil { - fake.getConfigVersionReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.getConfigVersionReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersion(arg1 context.Context, arg2 int, arg3 string, arg4 []byte, arg5 runtime.ReadFileFunc) error { - var arg4Copy []byte - if arg4 != nil { - arg4Copy = make([]byte, len(arg4)) - copy(arg4Copy, arg4) - } - fake.waitForCorrectVersionMutex.Lock() - ret, specificReturn := fake.waitForCorrectVersionReturnsOnCall[len(fake.waitForCorrectVersionArgsForCall)] - fake.waitForCorrectVersionArgsForCall = append(fake.waitForCorrectVersionArgsForCall, struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - }{arg1, arg2, arg3, arg4Copy, arg5}) - stub := fake.WaitForCorrectVersionStub - fakeReturns := fake.waitForCorrectVersionReturns - fake.recordInvocation("WaitForCorrectVersion", []interface{}{arg1, arg2, arg3, arg4Copy, arg5}) - fake.waitForCorrectVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3, arg4, arg5) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionCallCount() int { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - return len(fake.waitForCorrectVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionCalls(stub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionArgsForCall(i int) (context.Context, int, string, []byte, runtime.ReadFileFunc) { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - argsForCall := fake.waitForCorrectVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionReturns(result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - fake.waitForCorrectVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionReturnsOnCall(i int, result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - if fake.waitForCorrectVersionReturnsOnCall == nil { - fake.waitForCorrectVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.waitForCorrectVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeNginxConfigVerifier) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go deleted file mode 100644 index 0a5065e74b..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go +++ /dev/null @@ -1,370 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginxinc/nginx-plus-go-client/client" -) - -type FakeNginxPlusClient struct { - GetStreamUpstreamsStub func() (*client.StreamUpstreams, error) - getStreamUpstreamsMutex sync.RWMutex - getStreamUpstreamsArgsForCall []struct { - } - getStreamUpstreamsReturns struct { - result1 *client.StreamUpstreams - result2 error - } - getStreamUpstreamsReturnsOnCall map[int]struct { - result1 *client.StreamUpstreams - result2 error - } - GetUpstreamsStub func() (*client.Upstreams, error) - getUpstreamsMutex sync.RWMutex - getUpstreamsArgsForCall []struct { - } - getUpstreamsReturns struct { - result1 *client.Upstreams - result2 error - } - getUpstreamsReturnsOnCall map[int]struct { - result1 *client.Upstreams - result2 error - } - UpdateHTTPServersStub func(string, []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) - updateHTTPServersMutex sync.RWMutex - updateHTTPServersArgsForCall []struct { - arg1 string - arg2 []client.UpstreamServer - } - updateHTTPServersReturns struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - } - updateHTTPServersReturnsOnCall map[int]struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - } - UpdateStreamServersStub func(string, []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) - updateStreamServersMutex sync.RWMutex - updateStreamServersArgsForCall []struct { - arg1 string - arg2 []client.StreamUpstreamServer - } - updateStreamServersReturns struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - } - updateStreamServersReturnsOnCall map[int]struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreams() (*client.StreamUpstreams, error) { - fake.getStreamUpstreamsMutex.Lock() - ret, specificReturn := fake.getStreamUpstreamsReturnsOnCall[len(fake.getStreamUpstreamsArgsForCall)] - fake.getStreamUpstreamsArgsForCall = append(fake.getStreamUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetStreamUpstreamsStub - fakeReturns := fake.getStreamUpstreamsReturns - fake.recordInvocation("GetStreamUpstreams", []interface{}{}) - fake.getStreamUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsCallCount() int { - fake.getStreamUpstreamsMutex.RLock() - defer fake.getStreamUpstreamsMutex.RUnlock() - return len(fake.getStreamUpstreamsArgsForCall) -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsCalls(stub func() (*client.StreamUpstreams, error)) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = stub -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsReturns(result1 *client.StreamUpstreams, result2 error) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = nil - fake.getStreamUpstreamsReturns = struct { - result1 *client.StreamUpstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsReturnsOnCall(i int, result1 *client.StreamUpstreams, result2 error) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = nil - if fake.getStreamUpstreamsReturnsOnCall == nil { - fake.getStreamUpstreamsReturnsOnCall = make(map[int]struct { - result1 *client.StreamUpstreams - result2 error - }) - } - fake.getStreamUpstreamsReturnsOnCall[i] = struct { - result1 *client.StreamUpstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetUpstreams() (*client.Upstreams, error) { - fake.getUpstreamsMutex.Lock() - ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] - fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetUpstreamsStub - fakeReturns := fake.getUpstreamsReturns - fake.recordInvocation("GetUpstreams", []interface{}{}) - fake.getUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxPlusClient) GetUpstreamsCallCount() int { - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - return len(fake.getUpstreamsArgsForCall) -} - -func (fake *FakeNginxPlusClient) GetUpstreamsCalls(stub func() (*client.Upstreams, error)) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = stub -} - -func (fake *FakeNginxPlusClient) GetUpstreamsReturns(result1 *client.Upstreams, result2 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - fake.getUpstreamsReturns = struct { - result1 *client.Upstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetUpstreamsReturnsOnCall(i int, result1 *client.Upstreams, result2 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - if fake.getUpstreamsReturnsOnCall == nil { - fake.getUpstreamsReturnsOnCall = make(map[int]struct { - result1 *client.Upstreams - result2 error - }) - } - fake.getUpstreamsReturnsOnCall[i] = struct { - result1 *client.Upstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServers(arg1 string, arg2 []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) { - var arg2Copy []client.UpstreamServer - if arg2 != nil { - arg2Copy = make([]client.UpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateHTTPServersMutex.Lock() - ret, specificReturn := fake.updateHTTPServersReturnsOnCall[len(fake.updateHTTPServersArgsForCall)] - fake.updateHTTPServersArgsForCall = append(fake.updateHTTPServersArgsForCall, struct { - arg1 string - arg2 []client.UpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateHTTPServersStub - fakeReturns := fake.updateHTTPServersReturns - fake.recordInvocation("UpdateHTTPServers", []interface{}{arg1, arg2Copy}) - fake.updateHTTPServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2, ret.result3, ret.result4 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersCallCount() int { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - return len(fake.updateHTTPServersArgsForCall) -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersCalls(stub func(string, []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error)) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = stub -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersArgsForCall(i int) (string, []client.UpstreamServer) { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - argsForCall := fake.updateHTTPServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersReturns(result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - fake.updateHTTPServersReturns = struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - if fake.updateHTTPServersReturnsOnCall == nil { - fake.updateHTTPServersReturnsOnCall = make(map[int]struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }) - } - fake.updateHTTPServersReturnsOnCall[i] = struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateStreamServers(arg1 string, arg2 []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) { - var arg2Copy []client.StreamUpstreamServer - if arg2 != nil { - arg2Copy = make([]client.StreamUpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateStreamServersMutex.Lock() - ret, specificReturn := fake.updateStreamServersReturnsOnCall[len(fake.updateStreamServersArgsForCall)] - fake.updateStreamServersArgsForCall = append(fake.updateStreamServersArgsForCall, struct { - arg1 string - arg2 []client.StreamUpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateStreamServersStub - fakeReturns := fake.updateStreamServersReturns - fake.recordInvocation("UpdateStreamServers", []interface{}{arg1, arg2Copy}) - fake.updateStreamServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2, ret.result3, ret.result4 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersCallCount() int { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - return len(fake.updateStreamServersArgsForCall) -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersCalls(stub func(string, []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error)) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = stub -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersArgsForCall(i int) (string, []client.StreamUpstreamServer) { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - argsForCall := fake.updateStreamServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersReturns(result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - fake.updateStreamServersReturns = struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersReturnsOnCall(i int, result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - if fake.updateStreamServersReturnsOnCall == nil { - fake.updateStreamServersReturnsOnCall = make(map[int]struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }) - } - fake.updateStreamServersReturnsOnCall[i] = struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getStreamUpstreamsMutex.RLock() - defer fake.getStreamUpstreamsMutex.RUnlock() - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeNginxPlusClient) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.NginxPlusClient = new(FakeNginxPlusClient) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go deleted file mode 100644 index 481a354f25..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - "time" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeProcessHandler struct { - FindMainProcessStub func(context.Context, time.Duration) (int, error) - findMainProcessMutex sync.RWMutex - findMainProcessArgsForCall []struct { - arg1 context.Context - arg2 time.Duration - } - findMainProcessReturns struct { - result1 int - result2 error - } - findMainProcessReturnsOnCall map[int]struct { - result1 int - result2 error - } - KillStub func(int) error - killMutex sync.RWMutex - killArgsForCall []struct { - arg1 int - } - killReturns struct { - result1 error - } - killReturnsOnCall map[int]struct { - result1 error - } - ReadFileStub func(string) ([]byte, error) - readFileMutex sync.RWMutex - readFileArgsForCall []struct { - arg1 string - } - readFileReturns struct { - result1 []byte - result2 error - } - readFileReturnsOnCall map[int]struct { - result1 []byte - result2 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeProcessHandler) FindMainProcess(arg1 context.Context, arg2 time.Duration) (int, error) { - fake.findMainProcessMutex.Lock() - ret, specificReturn := fake.findMainProcessReturnsOnCall[len(fake.findMainProcessArgsForCall)] - fake.findMainProcessArgsForCall = append(fake.findMainProcessArgsForCall, struct { - arg1 context.Context - arg2 time.Duration - }{arg1, arg2}) - stub := fake.FindMainProcessStub - fakeReturns := fake.findMainProcessReturns - fake.recordInvocation("FindMainProcess", []interface{}{arg1, arg2}) - fake.findMainProcessMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeProcessHandler) FindMainProcessCallCount() int { - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - return len(fake.findMainProcessArgsForCall) -} - -func (fake *FakeProcessHandler) FindMainProcessCalls(stub func(context.Context, time.Duration) (int, error)) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = stub -} - -func (fake *FakeProcessHandler) FindMainProcessArgsForCall(i int) (context.Context, time.Duration) { - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - argsForCall := fake.findMainProcessArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeProcessHandler) FindMainProcessReturns(result1 int, result2 error) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = nil - fake.findMainProcessReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) FindMainProcessReturnsOnCall(i int, result1 int, result2 error) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = nil - if fake.findMainProcessReturnsOnCall == nil { - fake.findMainProcessReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.findMainProcessReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) Kill(arg1 int) error { - fake.killMutex.Lock() - ret, specificReturn := fake.killReturnsOnCall[len(fake.killArgsForCall)] - fake.killArgsForCall = append(fake.killArgsForCall, struct { - arg1 int - }{arg1}) - stub := fake.KillStub - fakeReturns := fake.killReturns - fake.recordInvocation("Kill", []interface{}{arg1}) - fake.killMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeProcessHandler) KillCallCount() int { - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - return len(fake.killArgsForCall) -} - -func (fake *FakeProcessHandler) KillCalls(stub func(int) error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = stub -} - -func (fake *FakeProcessHandler) KillArgsForCall(i int) int { - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - argsForCall := fake.killArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeProcessHandler) KillReturns(result1 error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = nil - fake.killReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeProcessHandler) KillReturnsOnCall(i int, result1 error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = nil - if fake.killReturnsOnCall == nil { - fake.killReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.killReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeProcessHandler) ReadFile(arg1 string) ([]byte, error) { - fake.readFileMutex.Lock() - ret, specificReturn := fake.readFileReturnsOnCall[len(fake.readFileArgsForCall)] - fake.readFileArgsForCall = append(fake.readFileArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadFileStub - fakeReturns := fake.readFileReturns - fake.recordInvocation("ReadFile", []interface{}{arg1}) - fake.readFileMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeProcessHandler) ReadFileCallCount() int { - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - return len(fake.readFileArgsForCall) -} - -func (fake *FakeProcessHandler) ReadFileCalls(stub func(string) ([]byte, error)) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = stub -} - -func (fake *FakeProcessHandler) ReadFileArgsForCall(i int) string { - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - argsForCall := fake.readFileArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeProcessHandler) ReadFileReturns(result1 []byte, result2 error) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = nil - fake.readFileReturns = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) ReadFileReturnsOnCall(i int, result1 []byte, result2 error) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = nil - if fake.readFileReturnsOnCall == nil { - fake.readFileReturnsOnCall = make(map[int]struct { - result1 []byte - result2 error - }) - } - fake.readFileReturnsOnCall[i] = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeProcessHandler) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.ProcessHandler = new(FakeProcessHandler) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go deleted file mode 100644 index 8c6aa7c426..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeVerifyClient struct { - EnsureConfigVersionStub func(context.Context, int) error - ensureConfigVersionMutex sync.RWMutex - ensureConfigVersionArgsForCall []struct { - arg1 context.Context - arg2 int - } - ensureConfigVersionReturns struct { - result1 error - } - ensureConfigVersionReturnsOnCall map[int]struct { - result1 error - } - GetConfigVersionStub func() (int, error) - getConfigVersionMutex sync.RWMutex - getConfigVersionArgsForCall []struct { - } - getConfigVersionReturns struct { - result1 int - result2 error - } - getConfigVersionReturnsOnCall map[int]struct { - result1 int - result2 error - } - WaitForCorrectVersionStub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error - waitForCorrectVersionMutex sync.RWMutex - waitForCorrectVersionArgsForCall []struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - } - waitForCorrectVersionReturns struct { - result1 error - } - waitForCorrectVersionReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeVerifyClient) EnsureConfigVersion(arg1 context.Context, arg2 int) error { - fake.ensureConfigVersionMutex.Lock() - ret, specificReturn := fake.ensureConfigVersionReturnsOnCall[len(fake.ensureConfigVersionArgsForCall)] - fake.ensureConfigVersionArgsForCall = append(fake.ensureConfigVersionArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.EnsureConfigVersionStub - fakeReturns := fake.ensureConfigVersionReturns - fake.recordInvocation("EnsureConfigVersion", []interface{}{arg1, arg2}) - fake.ensureConfigVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeVerifyClient) EnsureConfigVersionCallCount() int { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - return len(fake.ensureConfigVersionArgsForCall) -} - -func (fake *FakeVerifyClient) EnsureConfigVersionCalls(stub func(context.Context, int) error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = stub -} - -func (fake *FakeVerifyClient) EnsureConfigVersionArgsForCall(i int) (context.Context, int) { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - argsForCall := fake.ensureConfigVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeVerifyClient) EnsureConfigVersionReturns(result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - fake.ensureConfigVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) EnsureConfigVersionReturnsOnCall(i int, result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - if fake.ensureConfigVersionReturnsOnCall == nil { - fake.ensureConfigVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.ensureConfigVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) GetConfigVersion() (int, error) { - fake.getConfigVersionMutex.Lock() - ret, specificReturn := fake.getConfigVersionReturnsOnCall[len(fake.getConfigVersionArgsForCall)] - fake.getConfigVersionArgsForCall = append(fake.getConfigVersionArgsForCall, struct { - }{}) - stub := fake.GetConfigVersionStub - fakeReturns := fake.getConfigVersionReturns - fake.recordInvocation("GetConfigVersion", []interface{}{}) - fake.getConfigVersionMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeVerifyClient) GetConfigVersionCallCount() int { - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - return len(fake.getConfigVersionArgsForCall) -} - -func (fake *FakeVerifyClient) GetConfigVersionCalls(stub func() (int, error)) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = stub -} - -func (fake *FakeVerifyClient) GetConfigVersionReturns(result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - fake.getConfigVersionReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeVerifyClient) GetConfigVersionReturnsOnCall(i int, result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - if fake.getConfigVersionReturnsOnCall == nil { - fake.getConfigVersionReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.getConfigVersionReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeVerifyClient) WaitForCorrectVersion(arg1 context.Context, arg2 int, arg3 string, arg4 []byte, arg5 runtime.ReadFileFunc) error { - var arg4Copy []byte - if arg4 != nil { - arg4Copy = make([]byte, len(arg4)) - copy(arg4Copy, arg4) - } - fake.waitForCorrectVersionMutex.Lock() - ret, specificReturn := fake.waitForCorrectVersionReturnsOnCall[len(fake.waitForCorrectVersionArgsForCall)] - fake.waitForCorrectVersionArgsForCall = append(fake.waitForCorrectVersionArgsForCall, struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - }{arg1, arg2, arg3, arg4Copy, arg5}) - stub := fake.WaitForCorrectVersionStub - fakeReturns := fake.waitForCorrectVersionReturns - fake.recordInvocation("WaitForCorrectVersion", []interface{}{arg1, arg2, arg3, arg4Copy, arg5}) - fake.waitForCorrectVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3, arg4, arg5) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionCallCount() int { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - return len(fake.waitForCorrectVersionArgsForCall) -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionCalls(stub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = stub -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionArgsForCall(i int) (context.Context, int, string, []byte, runtime.ReadFileFunc) { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - argsForCall := fake.waitForCorrectVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionReturns(result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - fake.waitForCorrectVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionReturnsOnCall(i int, result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - if fake.waitForCorrectVersionReturnsOnCall == nil { - fake.waitForCorrectVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.waitForCorrectVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeVerifyClient) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} diff --git a/internal/mode/static/nginx/runtime/verify.go b/internal/mode/static/nginx/runtime/verify.go deleted file mode 100644 index e5d7e64b33..0000000000 --- a/internal/mode/static/nginx/runtime/verify.go +++ /dev/null @@ -1,155 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/util/wait" -) - -const configVersionURI = "/var/run/nginx/nginx-config-version.sock" - -var noNewWorkersErrFmt = "reload unsuccessful: no new NGINX worker processes started for config version %d." + - " Please check the NGINX container logs for possible configuration issues: %w" - -//go:generate go tool counterfeiter . nginxConfigVerifier - -type nginxConfigVerifier interface { - GetConfigVersion() (int, error) - WaitForCorrectVersion( - ctx context.Context, - expectedVersion int, - childProcFile string, - previousChildProcesses []byte, - readFile ReadFileFunc, - ) error - EnsureConfigVersion(ctx context.Context, expectedVersion int) error -} - -// VerifyClient is a client for verifying the config version. -type VerifyClient struct { - client *http.Client - timeout time.Duration -} - -// NewVerifyClient returns a new client pointed at the config version socket. -func NewVerifyClient(timeout time.Duration) *VerifyClient { - return &VerifyClient{ - client: &http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", configVersionURI) - }, - }, - }, - timeout: timeout, - } -} - -// GetConfigVersion gets the version number that we put in the nginx config to verify that we're using -// the correct config. -func (c *VerifyClient) GetConfigVersion() (int, error) { - ctx, cancel := context.WithTimeout(context.Background(), c.timeout) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://config-version/version", nil) - if err != nil { - return 0, fmt.Errorf("error creating request: %w", err) - } - - resp, err := c.client.Do(req) - if err != nil { - return 0, fmt.Errorf("error getting client: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return 0, fmt.Errorf("non-200 response: %v", resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return 0, fmt.Errorf("failed to read the response body: %w", err) - } - v, err := strconv.Atoi(string(body)) - if err != nil { - return 0, fmt.Errorf("error converting string to int: %w", err) - } - return v, nil -} - -// WaitForCorrectVersion first ensures any new worker processes have been started, and then calls the config version -// endpoint until it gets the expectedVersion, which ensures that a new worker process has been started for that config -// version. -func (c *VerifyClient) WaitForCorrectVersion( - ctx context.Context, - expectedVersion int, - childProcFile string, - previousChildProcesses []byte, - readFile ReadFileFunc, -) error { - ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() - - if err := ensureNewNginxWorkers( - ctx, - childProcFile, - previousChildProcesses, - readFile, - ); err != nil { - return fmt.Errorf(noNewWorkersErrFmt, expectedVersion, err) - } - - if err := c.EnsureConfigVersion(ctx, expectedVersion); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - err = fmt.Errorf( - "config version check didn't return expected version %d within the deadline", - expectedVersion, - ) - } - return fmt.Errorf("could not get expected config version %d: %w", expectedVersion, err) - } - return nil -} - -func (c *VerifyClient) EnsureConfigVersion(ctx context.Context, expectedVersion int) error { - return wait.PollUntilContextCancel( - ctx, - 25*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - version, err := c.GetConfigVersion() - return version == expectedVersion, err - }, - ) -} - -func ensureNewNginxWorkers( - ctx context.Context, - childProcFile string, - previousContents []byte, - readFile ReadFileFunc, -) error { - return wait.PollUntilContextCancel( - ctx, - 25*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - content, err := readFile(childProcFile) - if err != nil { - return false, err - } - if !bytes.Equal(previousContents, content) { - return true, nil - } - return false, nil - }, - ) -} diff --git a/internal/mode/static/nginx/runtime/verify_test.go b/internal/mode/static/nginx/runtime/verify_test.go deleted file mode 100644 index d20844a410..0000000000 --- a/internal/mode/static/nginx/runtime/verify_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "testing" - "time" - - . "github.com/onsi/gomega" -) - -type transport struct{} - -func (c transport) RoundTrip(_ *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(bytes.NewBufferString("42")), - Header: make(http.Header), - }, nil -} - -func getTestHTTPClient() *http.Client { - ts := transport{} - return &http.Client{ - Transport: ts, - } -} - -func TestVerifyClient(t *testing.T) { - t.Parallel() - c := VerifyClient{ - client: getTestHTTPClient(), - timeout: 25 * time.Millisecond, - } - - ctx := context.Background() - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel) - - newContents := []byte("4 5 6") - - readFileNew := func(string) ([]byte, error) { - return newContents, nil - } - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - tests := []struct { - ctx context.Context - readFile ReadFileFunc - name string - expectedVersion int - expectError bool - }{ - { - ctx: ctx, - expectedVersion: 42, - readFile: readFileNew, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - expectedVersion: 43, - readFile: readFileNew, - expectError: true, - name: "wrong version", - }, - { - ctx: ctx, - expectedVersion: 0, - readFile: readFileError, - expectError: true, - name: "no new workers", - }, - { - ctx: cancellingCtx, - expectedVersion: 0, - readFile: readFileNew, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := c.WaitForCorrectVersion(test.ctx, test.expectedVersion, "/childfile", []byte("1 2 3"), test.readFile) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} - -func TestEnsureNewNginxWorkers(t *testing.T) { - t.Parallel() - previousContents := []byte("1 2 3") - newContents := []byte("4 5 6") - - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - readFilePrevious := func(string) ([]byte, error) { - return previousContents, nil - } - - readFileNew := func(string) ([]byte, error) { - return newContents, nil - } - - ctx := context.Background() - - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(100*time.Millisecond, cancel) - - cancellingCtx2, cancel2 := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel2) - - tests := []struct { - ctx context.Context - readFile ReadFileFunc - name string - previousContents []byte - expectError bool - }{ - { - ctx: ctx, - readFile: readFileNew, - previousContents: previousContents, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - readFile: readFileError, - previousContents: previousContents, - expectError: true, - name: "cannot read file", - }, - { - ctx: cancellingCtx, - readFile: readFilePrevious, - previousContents: previousContents, - expectError: true, - name: "timed out waiting for new workers", - }, - { - ctx: cancellingCtx2, - readFile: readFilePrevious, - previousContents: previousContents, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := ensureNewNginxWorkers( - test.ctx, - "/childfile", - test.previousContents, - test.readFile, - ) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} diff --git a/internal/mode/static/provisioner/doc.go b/internal/mode/static/provisioner/doc.go new file mode 100644 index 0000000000..14cffc569b --- /dev/null +++ b/internal/mode/static/provisioner/doc.go @@ -0,0 +1,4 @@ +/* +Package provisioner contains the functions for deploying an instance of nginx. +*/ +package provisioner diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go new file mode 100644 index 0000000000..5c5d4bea49 --- /dev/null +++ b/internal/mode/static/provisioner/eventloop.go @@ -0,0 +1,202 @@ +package provisioner + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + k8spredicate "sigs.k8s.io/controller-runtime/pkg/predicate" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/predicate" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" +) + +func newEventLoop( + ctx context.Context, + mgr manager.Manager, + handler *eventHandler, + logger logr.Logger, + selector metav1.LabelSelector, + ngfNamespace string, + dockerSecrets []string, + agentTLSSecret string, + usageConfig *config.UsageReportConfig, + isOpenshift bool, +) (*events.EventLoop, error) { + nginxResourceLabelPredicate := predicate.NginxLabelPredicate(selector) + + secretsToWatch := make([]string, 0, len(dockerSecrets)+4) + secretsToWatch = append(secretsToWatch, agentTLSSecret) + secretsToWatch = append(secretsToWatch, dockerSecrets...) + + if usageConfig != nil { + if usageConfig.SecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.SecretName) + } + if usageConfig.CASecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.CASecretName) + } + if usageConfig.ClientSSLSecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.ClientSSLSecretName) + } + } + + type ctlrCfg struct { + objectType ngftypes.ObjectType + options []controller.Option + } + + controllerRegCfgs := []ctlrCfg{ + { + objectType: &gatewayv1.Gateway{}, + }, + { + objectType: &appsv1.Deployment{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + predicate.RestartDeploymentAnnotationPredicate{}, + ), + ), + }, + }, + { + objectType: &corev1.Service{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + nginxResourceLabelPredicate, + ), + ), + }, + }, + { + objectType: &corev1.ServiceAccount{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + { + objectType: &corev1.ConfigMap{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + { + objectType: &corev1.Secret{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.ResourceVersionChangedPredicate{}, + k8spredicate.Or( + nginxResourceLabelPredicate, + predicate.SecretNamePredicate{Namespace: ngfNamespace, SecretNames: secretsToWatch}, + ), + ), + ), + }, + }, + } + + if isOpenshift { + controllerRegCfgs = append(controllerRegCfgs, + ctlrCfg{ + objectType: &rbacv1.Role{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + ctlrCfg{ + objectType: &rbacv1.RoleBinding{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + ) + } + + eventCh := make(chan any) + for _, regCfg := range controllerRegCfgs { + gvk, err := apiutil.GVKForObject(regCfg.objectType, mgr.GetScheme()) + if err != nil { + panic(fmt.Sprintf("could not extract GVK for object: %T", regCfg.objectType)) + } + + if err := controller.Register( + ctx, + regCfg.objectType, + fmt.Sprintf("provisioner-%s", gvk.Kind), + mgr, + eventCh, + regCfg.options..., + ); err != nil { + return nil, fmt.Errorf("cannot register controller for %T: %w", regCfg.objectType, err) + } + } + + objectList := []client.ObjectList{ + // GatewayList MUST be first in this list to ensure that we see it before attempting + // to provision or deprovision any nginx resources. + &gatewayv1.GatewayList{}, + &appsv1.DeploymentList{}, + &corev1.ServiceList{}, + &corev1.ServiceAccountList{}, + &corev1.ConfigMapList{}, + &corev1.SecretList{}, + } + + if isOpenshift { + objectList = append(objectList, + &rbacv1.RoleList{}, + &rbacv1.RoleBindingList{}, + ) + } + + firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( + mgr.GetCache(), + []client.Object{}, + objectList, + ) + + eventLoop := events.NewEventLoop( + eventCh, + logger.WithName("eventLoop"), + handler, + firstBatchPreparer, + ) + + return eventLoop, nil +} diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go new file mode 100644 index 0000000000..ee7813fb96 --- /dev/null +++ b/internal/mode/static/provisioner/handler.go @@ -0,0 +1,312 @@ +package provisioner + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +// eventHandler ensures each Gateway for the specific GatewayClass has a corresponding Deployment +// of NGF configured to use that specific Gateway. +// +// eventHandler implements events.Handler interface. +type eventHandler struct { + store *store + provisioner *NginxProvisioner + labelSelector labels.Selector + // gcName is the GatewayClass name for this control plane. + gcName string +} + +func newEventHandler( + store *store, + provisioner *NginxProvisioner, + selector metav1.LabelSelector, + gcName string, +) (*eventHandler, error) { + labelSelector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + return nil, fmt.Errorf("error initializing label selector: %w", err) + } + + return &eventHandler{ + store: store, + provisioner: provisioner, + labelSelector: labelSelector, + gcName: gcName, + }, nil +} + +//nolint:gocyclo // will refactor at some point +func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, batch events.EventBatch) { + for _, event := range batch { + switch e := event.(type) { + case *events.UpsertEvent: + switch obj := e.Resource.(type) { + case *gatewayv1.Gateway: + h.store.updateGateway(obj) + case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap, *rbacv1.Role, *rbacv1.RoleBinding: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + } + case *corev1.Service: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + + statusUpdate := &status.QueueObject{ + Deployment: client.ObjectKeyFromObject(obj), + UpdateType: status.UpdateGateway, + GatewayService: obj, + } + h.provisioner.cfg.StatusQueue.Enqueue(statusUpdate) + } + case *corev1.Secret: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + } else if h.provisioner.isUserSecret(obj.GetName()) { + if err := h.provisionResourceForAllGateways(ctx, logger, obj); err != nil { + logger.Error(err, "error updating resource") + } + } + default: + panic(fmt.Errorf("unknown resource type %T", e.Resource)) + } + case *events.DeleteEvent: + switch e.Type.(type) { + case *gatewayv1.Gateway: + if !h.provisioner.isLeader() { + h.provisioner.setResourceToDelete(e.NamespacedName) + } + + if err := h.provisioner.deprovisionNginx(ctx, e.NamespacedName); err != nil { + logger.Error(err, "error deprovisioning nginx resources") + } + h.store.deleteGateway(e.NamespacedName) + case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, + *corev1.ConfigMap, *rbacv1.Role, *rbacv1.RoleBinding: + if err := h.reprovisionResources(ctx, e); err != nil { + logger.Error(err, "error re-provisioning nginx resources") + } + case *corev1.Secret: + if h.provisioner.isUserSecret(e.NamespacedName.Name) { + if err := h.deprovisionSecretsForAllGateways(ctx, e.NamespacedName.Name); err != nil { + logger.Error(err, "error removing secrets") + } + } else { + if err := h.reprovisionResources(ctx, e); err != nil { + logger.Error(err, "error re-provisioning nginx resources") + } + } + default: + panic(fmt.Errorf("unknown resource type %T", e.Type)) + } + default: + panic(fmt.Errorf("unknown event type %T", e)) + } + } +} + +// updateOrDeleteResources ensures that nginx resources are either: +// - deleted if the Gateway no longer exists (this is for when the controller first starts up) +// - are updated to the proper state in case a user makes a change directly to the resource. +func (h *eventHandler) updateOrDeleteResources( + ctx context.Context, + logger logr.Logger, + obj client.Object, + gatewayNSName types.NamespacedName, +) error { + if gw := h.store.getGateway(gatewayNSName); gw == nil { + if !h.provisioner.isLeader() { + h.provisioner.setResourceToDelete(gatewayNSName) + + return nil + } + + if err := h.provisioner.deprovisionNginx(ctx, gatewayNSName); err != nil { + return fmt.Errorf("error deprovisioning nginx resources: %w", err) + } + return nil + } + + if h.store.getResourceVersionForObject(gatewayNSName, obj) == obj.GetResourceVersion() { + return nil + } + + h.store.registerResourceInGatewayConfig(gatewayNSName, obj) + if err := h.provisionResource(ctx, logger, gatewayNSName, obj); err != nil { + return fmt.Errorf("error updating nginx resource: %w", err) + } + + return nil +} + +func (h *eventHandler) provisionResource( + ctx context.Context, + logger logr.Logger, + gatewayNSName types.NamespacedName, + obj client.Object, +) error { + resources := h.store.getNginxResourcesForGateway(gatewayNSName) + if resources != nil && resources.Gateway != nil { + resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) + + objects, err := h.provisioner.buildNginxResourceObjects( + resourceName, + resources.Gateway.Source, + resources.Gateway.EffectiveNginxProxy, + ) + if err != nil { + logger.Error(err, "error building some nginx resources") + } + + // only provision the object that was updated + var objectToProvision client.Object + for _, object := range objects { + if strings.HasSuffix(object.GetName(), obj.GetName()) && reflect.TypeOf(object) == reflect.TypeOf(obj) { + objectToProvision = object + break + } + } + + if objectToProvision == nil { + return nil + } + + if err := h.provisioner.provisionNginx( + ctx, + resourceName, + resources.Gateway.Source, + []client.Object{objectToProvision}, + ); err != nil { + return fmt.Errorf("error updating nginx resource: %w", err) + } + } + + return nil +} + +// reprovisionResources redeploys nginx resources that have been deleted but should not have been. +func (h *eventHandler) reprovisionResources(ctx context.Context, event *events.DeleteEvent) error { + if gateway := h.store.gatewayExistsForResource(event.Type, event.NamespacedName); gateway != nil && gateway.Valid { + resourceName := controller.CreateNginxResourceName(gateway.Source.GetName(), h.gcName) + if err := h.provisioner.reprovisionNginx( + ctx, + resourceName, + gateway.Source, + gateway.EffectiveNginxProxy, + ); err != nil { + return err + } + } + return nil +} + +// provisionResourceForAllGateways is called when a resource is updated that needs to be applied +// to all Gateway deployments. For example, NGINX Plus secrets. +func (h *eventHandler) provisionResourceForAllGateways( + ctx context.Context, + logger logr.Logger, + obj client.Object, +) error { + var allErrs []error + gateways := h.store.getGateways() + for gateway := range gateways { + if err := h.provisionResource(ctx, logger, gateway, obj); err != nil { + allErrs = append(allErrs, err) + } + } + + return errors.Join(allErrs...) +} + +// deprovisionSecretsForAllGateways cleans up any secrets that a user deleted that were duplicated +// for all Gateways. For example, NGINX Plus secrets. +func (h *eventHandler) deprovisionSecretsForAllGateways(ctx context.Context, secret string) error { + var allErrs []error + + gateways := h.store.getGateways() + for gateway := range gateways { + resources := h.store.getNginxResourcesForGateway(gateway) + if resources == nil { + continue + } + + switch { + case strings.HasSuffix(resources.AgentTLSSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.AgentTLSSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + case strings.HasSuffix(resources.PlusJWTSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusJWTSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + case strings.HasSuffix(resources.PlusCASecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusCASecret), + ); err != nil { + allErrs = append(allErrs, err) + } + case strings.HasSuffix(resources.PlusClientSSLSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusClientSSLSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + default: + for _, dockerSecret := range resources.DockerSecrets { + if strings.HasSuffix(dockerSecret.Name, secret) { + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(dockerSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + } + } + } + } + + return errors.Join(allErrs...) +} diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go new file mode 100644 index 0000000000..1f44a27de0 --- /dev/null +++ b/internal/mode/static/provisioner/handler_test.go @@ -0,0 +1,370 @@ +package provisioner + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +func TestHandleEventBatch_Upsert(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore([]string{dockerTestSecretName}, "", jwtTestSecretName, "", "") + provisioner, fakeClient, _ := defaultNginxProvisioner() + provisioner.cfg.StatusQueue = status.NewQueue() + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "nginx"}, + } + gcName := "nginx" + + handler, err := newEventHandler(store, provisioner, labelSelector, gcName) + g.Expect(err).ToNot(HaveOccurred()) + + ctx := context.TODO() + logger := logr.Discard() + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + Labels: map[string]string{"app": "nginx"}, + }, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + jwtTestSecretName, + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + Data: map[string][]byte{ + "data": []byte("oldData"), + }, + } + + userJwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{ + "data": []byte("oldData"), + }, + } + g.Expect(fakeClient.Create(ctx, userJwtSecret)).To(Succeed()) + + dockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + dockerTestSecretName, + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + Data: map[string][]byte{ + "data": []byte("oldDockerData"), + }, + } + + userDockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{ + "data": []byte("oldDockerData"), + }, + } + g.Expect(fakeClient.Create(ctx, userDockerSecret)).To(Succeed()) + + // Test handling Gateway + upsertEvent := &events.UpsertEvent{Resource: gateway} + batch := events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(Equal(gateway)) + + store.registerResourceInGatewayConfig( + client.ObjectKeyFromObject(gateway), + &graph.Gateway{Source: gateway, Valid: true}, + ) + + // Test handling Deployment + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // Test handling Service + upsertEvent = &events.UpsertEvent{Resource: service} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.cfg.StatusQueue.Dequeue(ctx)).ToNot(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(service), &corev1.Service{})).To(Succeed()) + + // Test handling provisioned Secret + upsertEvent = &events.UpsertEvent{Resource: jwtSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), &corev1.Secret{})).To(Succeed()) + + // Test handling user Plus Secret + secret := &corev1.Secret{} + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("oldData"))) + + userJwtSecret.Data["data"] = []byte("newData") + g.Expect(fakeClient.Update(ctx, userJwtSecret)).To(Succeed()) + upsertEvent = &events.UpsertEvent{Resource: userJwtSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("newData"))) + + // Test handling user Docker Secret + upsertEvent = &events.UpsertEvent{Resource: dockerSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(dockerSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("oldDockerData"))) + + userDockerSecret.Data["data"] = []byte("newDockerData") + g.Expect(fakeClient.Update(ctx, userDockerSecret)).To(Succeed()) + upsertEvent = &events.UpsertEvent{Resource: userDockerSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(dockerSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("newDockerData"))) + + // remove Gateway from store and verify that Deployment UpsertEvent results in deletion of resource + store.deleteGateway(client.ObjectKeyFromObject(gateway)) + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).ToNot(Succeed()) + + // do the same thing but when provisioner is not leader. + // non-leader should not delete resources, but instead track them + deployment.ResourceVersion = "" + g.Expect(fakeClient.Create(ctx, deployment)).To(Succeed()) + provisioner.leader = false + + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.resourcesToDeleteOnStartup).To(HaveLen(1)) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) +} + +func TestHandleEventBatch_Delete(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore( + []string{dockerTestSecretName}, + agentTLSTestSecretName, + jwtTestSecretName, + caTestSecretName, + clientTestSecretName, + ) + provisioner, fakeClient, _ := defaultNginxProvisioner() + provisioner.cfg.StatusQueue = status.NewQueue() + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "nginx"}, + } + gcName := "nginx" + + handler, err := newEventHandler(store, provisioner, labelSelector, gcName) + g.Expect(err).ToNot(HaveOccurred()) + + ctx := context.TODO() + logger := logr.Discard() + + // initialize resources + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + Labels: map[string]string{"app": "nginx"}, + }, + } + + store.registerResourceInGatewayConfig( + client.ObjectKeyFromObject(gateway), + &graph.Gateway{Source: gateway, Valid: true}, + ) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + originalAgentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, originalAgentTLSSecret)).To(Succeed()) + + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + jwtTestSecretName, + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + userJwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userJwtSecret)).To(Succeed()) + + userCASecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userCASecret)).To(Succeed()) + + userClientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userClientSSLSecret)).To(Succeed()) + + userDockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userDockerSecret)).To(Succeed()) + + upsertEvent := &events.UpsertEvent{Resource: gateway} + batch := events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), deployment) + + // if deployment is deleted, it should be re-created since Gateway still exists + deleteEvent := &events.DeleteEvent{Type: deployment, NamespacedName: client.ObjectKeyFromObject(deployment)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // if provisioned secret is deleted, it should be re-created + deleteEvent = &events.DeleteEvent{Type: jwtSecret, NamespacedName: client.ObjectKeyFromObject(jwtSecret)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), &corev1.Secret{})).To(Succeed()) + + // if user-provided secrets are deleted, then delete the duplicates of them + verifySecret := func(name string, userSecret *corev1.Secret) { + key := types.NamespacedName{ + Name: "gw-nginx-" + name, + Namespace: "default", + } + + secret := &corev1.Secret{} + g.Expect(fakeClient.Get(ctx, key, secret)).To(Succeed()) + store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), secret) + + g.Expect(fakeClient.Delete(ctx, userSecret)).To(Succeed()) + deleteEvent = &events.DeleteEvent{Type: userSecret, NamespacedName: client.ObjectKeyFromObject(userSecret)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, key, &corev1.Secret{})).ToNot(Succeed()) + } + + verifySecret(agentTLSTestSecretName, originalAgentTLSSecret) + verifySecret(jwtTestSecretName, userJwtSecret) + verifySecret(caTestSecretName, userCASecret) + verifySecret(clientTestSecretName, userClientSSLSecret) + verifySecret(dockerTestSecretName, userDockerSecret) + + // delete Gateway when provisioner is not leader + provisioner.leader = false + + deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.resourcesToDeleteOnStartup).To(Equal([]types.NamespacedName{ + { + Namespace: "default", + Name: "gw", + }, + })) + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // delete Gateway when provisioner is leader + provisioner.leader = true + + deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).ToNot(Succeed()) +} diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go new file mode 100644 index 0000000000..57d6933b2c --- /dev/null +++ b/internal/mode/static/provisioner/objects.go @@ -0,0 +1,984 @@ +package provisioner + +import ( + "context" + "errors" + "fmt" + "maps" + "sort" + "strconv" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +const ( + defaultNginxErrorLogLevel = "info" + nginxIncludesConfigMapNameSuffix = "includes-bootstrap" + nginxAgentConfigMapNameSuffix = "agent-config" + + defaultServiceType = corev1.ServiceTypeLoadBalancer + defaultServicePolicy = corev1.ServiceExternalTrafficPolicyLocal + + defaultNginxImagePath = "ghcr.io/nginx/nginx-gateway-fabric/nginx" + defaultNginxPlusImagePath = "private-registry.nginx.com/nginx-gateway-fabric/nginx-plus" + defaultImagePullPolicy = corev1.PullIfNotPresent +) + +var emptyDirVolumeSource = corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + +func (p *NginxProvisioner) buildNginxResourceObjects( + resourceName string, + gateway *gatewayv1.Gateway, + nProxyCfg *graph.EffectiveNginxProxy, +) ([]client.Object, error) { + // Need to ensure nginx resource objects are generated deterministically. Specifically when generating + // an object's field by ranging over a map, since ranging over a map is done in random order, we need to + // do some processing to ensure the generated results are the same each time. + + ngxIncludesConfigMapName := controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix) + ngxAgentConfigMapName := controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix) + agentTLSSecretName := controller.CreateNginxResourceName(resourceName, p.cfg.AgentTLSSecretName) + + var jwtSecretName, caSecretName, clientSSLSecretName string + if p.cfg.Plus { + jwtSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.SecretName) + if p.cfg.PlusUsageConfig.CASecretName != "" { + caSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.CASecretName) + } + if p.cfg.PlusUsageConfig.ClientSSLSecretName != "" { + clientSSLSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.ClientSSLSecretName) + } + } + + // map key is the new name, value is the original name + dockerSecretNames := make(map[string]string) + for _, name := range p.cfg.NginxDockerSecretNames { + newName := controller.CreateNginxResourceName(resourceName, name) + dockerSecretNames[newName] = name + } + + selectorLabels := make(map[string]string) + maps.Copy(selectorLabels, p.baseLabelSelector.MatchLabels) + selectorLabels[controller.GatewayLabel] = gateway.GetName() + selectorLabels[controller.AppNameLabel] = resourceName + + labels := make(map[string]string) + annotations := make(map[string]string) + + maps.Copy(labels, selectorLabels) + + if gateway.Spec.Infrastructure != nil { + for key, value := range gateway.Spec.Infrastructure.Labels { + labels[string(key)] = string(value) + } + + for key, value := range gateway.Spec.Infrastructure.Annotations { + annotations[string(key)] = string(value) + } + } + + objectMeta := metav1.ObjectMeta{ + Name: resourceName, + Namespace: gateway.GetNamespace(), + Labels: labels, + Annotations: annotations, + } + + secrets, err := p.buildNginxSecrets( + objectMeta, + agentTLSSecretName, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) + + configmaps := p.buildNginxConfigMaps( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + caSecretName != "", + clientSSLSecretName != "", + ) + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: objectMeta, + } + + var openshiftObjs []client.Object + if p.isOpenshift { + openshiftObjs = p.buildOpenshiftObjects(objectMeta) + } + + ports := make(map[int32]struct{}) + for _, listener := range gateway.Spec.Listeners { + ports[int32(listener.Port)] = struct{}{} + } + + service := buildNginxService(objectMeta, nProxyCfg, ports, selectorLabels) + deployment := p.buildNginxDeployment( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + ports, + selectorLabels, + agentTLSSecretName, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) + + // order to install resources: + // secrets + // configmaps + // serviceaccount + // role/binding (if openshift) + // service + // deployment/daemonset + + objects := make([]client.Object, 0, len(configmaps)+len(secrets)+len(openshiftObjs)+3) + objects = append(objects, secrets...) + objects = append(objects, configmaps...) + objects = append(objects, serviceAccount) + if p.isOpenshift { + objects = append(objects, openshiftObjs...) + } + objects = append(objects, service, deployment) + + return objects, err +} + +func (p *NginxProvisioner) buildNginxSecrets( + objectMeta metav1.ObjectMeta, + agentTLSSecretName string, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, +) ([]client.Object, error) { + var secrets []client.Object + var errs []error + + if agentTLSSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.AgentTLSSecretName, + metav1.ObjectMeta{ + Name: agentTLSSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeTLS, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + for newName, origName := range dockerSecretNames { + newSecret, err := p.getAndUpdateSecret( + origName, + metav1.ObjectMeta{ + Name: newName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeDockerConfigJson, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + // need to sort secrets so everytime buildNginxSecrets is called it will generate the exact same + // array of secrets. This is needed to satisfy deterministic results of the method. + sort.Slice(secrets, func(i, j int) bool { + return secrets[i].GetName() < secrets[j].GetName() + }) + + if jwtSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.SecretName, + metav1.ObjectMeta{ + Name: jwtSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeOpaque, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + if caSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.CASecretName, + metav1.ObjectMeta{ + Name: caSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeOpaque, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + if clientSSLSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.ClientSSLSecretName, + metav1.ObjectMeta{ + Name: clientSSLSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeTLS, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + return secrets, errors.Join(errs...) +} + +func (p *NginxProvisioner) getAndUpdateSecret( + name string, + newObjectMeta metav1.ObjectMeta, + secretType corev1.SecretType, +) (*corev1.Secret, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + key := types.NamespacedName{Namespace: p.cfg.GatewayPodConfig.Namespace, Name: name} + secret := &corev1.Secret{} + if err := p.k8sClient.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("error getting secret: %w", err) + } + + newSecret := &corev1.Secret{ + ObjectMeta: newObjectMeta, + Data: secret.Data, + Type: secretType, + } + + return newSecret, nil +} + +func (p *NginxProvisioner) buildNginxConfigMaps( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, + caSecret bool, + clientSSLSecret bool, +) []client.Object { + var logging *ngfAPIv1alpha2.NginxLogging + if nProxyCfg != nil && nProxyCfg.Logging != nil { + logging = nProxyCfg.Logging + } + + logLevel := defaultNginxErrorLogLevel + if logging != nil && logging.ErrorLevel != nil { + logLevel = string(*nProxyCfg.Logging.ErrorLevel) + } + + mainFields := map[string]interface{}{ + "ErrorLevel": logLevel, + } + + bootstrapCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ngxIncludesConfigMapName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + Data: map[string]string{ + "main.conf": string(helpers.MustExecuteTemplate(mainTemplate, mainFields)), + }, + } + + if p.cfg.Plus { + mgmtFields := map[string]interface{}{ + "UsageEndpoint": p.cfg.PlusUsageConfig.Endpoint, + "SkipVerify": p.cfg.PlusUsageConfig.SkipVerify, + "UsageCASecret": caSecret, + "UsageClientSSLSecret": clientSSLSecret, + } + + bootstrapCM.Data["mgmt.conf"] = string(helpers.MustExecuteTemplate(mgmtTemplate, mgmtFields)) + } + + metricsPort := config.DefaultNginxMetricsPort + port, enableMetrics := graph.MetricsEnabledForNginxProxy(nProxyCfg) + if port != nil { + metricsPort = *port + } + + agentFields := map[string]interface{}{ + "Plus": p.cfg.Plus, + "ServiceName": p.cfg.GatewayPodConfig.ServiceName, + "Namespace": p.cfg.GatewayPodConfig.Namespace, + "EnableMetrics": enableMetrics, + "MetricsPort": metricsPort, + } + + if logging != nil && logging.AgentLevel != nil { + agentFields["LogLevel"] = *logging.AgentLevel + } + + agentCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ngxAgentConfigMapName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + Data: map[string]string{ + "nginx-agent.conf": string(helpers.MustExecuteTemplate(agentTemplate, agentFields)), + }, + } + + return []client.Object{bootstrapCM, agentCM} +} + +func (p *NginxProvisioner) buildOpenshiftObjects(objectMeta metav1.ObjectMeta) []client.Object { + role := &rbacv1.Role{ + ObjectMeta: objectMeta, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{p.cfg.NGINXSCCName}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }, + }, + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: objectMeta, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: objectMeta.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: objectMeta.Name, + Namespace: objectMeta.Namespace, + }, + }, + } + + return []client.Object{role, roleBinding} +} + +func buildNginxService( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ports map[int32]struct{}, + selectorLabels map[string]string, +) *corev1.Service { + var serviceCfg ngfAPIv1alpha2.ServiceSpec + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil && nProxyCfg.Kubernetes.Service != nil { + serviceCfg = *nProxyCfg.Kubernetes.Service + } + + serviceType := defaultServiceType + if serviceCfg.ServiceType != nil { + serviceType = corev1.ServiceType(*serviceCfg.ServiceType) + } + + var servicePolicy corev1.ServiceExternalTrafficPolicyType + if serviceType != corev1.ServiceTypeClusterIP { + servicePolicy = defaultServicePolicy + if serviceCfg.ExternalTrafficPolicy != nil { + servicePolicy = corev1.ServiceExternalTrafficPolicy(*serviceCfg.ExternalTrafficPolicy) + } + } + + servicePorts := make([]corev1.ServicePort, 0, len(ports)) + for port := range ports { + servicePort := corev1.ServicePort{ + Name: fmt.Sprintf("port-%d", port), + Port: port, + TargetPort: intstr.FromInt32(port), + } + + if serviceType != corev1.ServiceTypeClusterIP { + for _, nodePort := range serviceCfg.NodePorts { + if nodePort.ListenerPort == port { + servicePort.NodePort = nodePort.Port + } + } + } + + servicePorts = append(servicePorts, servicePort) + } + + // need to sort ports so everytime buildNginxService is called it will generate the exact same + // array of ports. This is needed to satisfy deterministic results of the method. + sort.Slice(servicePorts, func(i, j int) bool { + return servicePorts[i].Port < servicePorts[j].Port + }) + + svc := &corev1.Service{ + ObjectMeta: objectMeta, + Spec: corev1.ServiceSpec{ + Type: serviceType, + Ports: servicePorts, + ExternalTrafficPolicy: servicePolicy, + Selector: selectorLabels, + }, + } + + if serviceCfg.LoadBalancerIP != nil { + svc.Spec.LoadBalancerIP = *serviceCfg.LoadBalancerIP + } + if serviceCfg.LoadBalancerClass != nil { + svc.Spec.LoadBalancerClass = serviceCfg.LoadBalancerClass + } + if serviceCfg.LoadBalancerSourceRanges != nil { + svc.Spec.LoadBalancerSourceRanges = serviceCfg.LoadBalancerSourceRanges + } + + return svc +} + +func (p *NginxProvisioner) buildNginxDeployment( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, + ports map[int32]struct{}, + selectorLabels map[string]string, + agentTLSSecretName string, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, +) client.Object { + podTemplateSpec := p.buildNginxPodTemplateSpec( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + ports, + agentTLSSecretName, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) + + var object client.Object + deployment := &appsv1.Deployment{ + ObjectMeta: objectMeta, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: podTemplateSpec, + }, + } + + var deploymentCfg ngfAPIv1alpha2.DeploymentSpec + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil && nProxyCfg.Kubernetes.Deployment != nil { + deploymentCfg = *nProxyCfg.Kubernetes.Deployment + } + + if deploymentCfg.Replicas != nil { + deployment.Spec.Replicas = deploymentCfg.Replicas + } + + object = deployment + + return object +} + +//nolint:gocyclo // will refactor at some point +func (p *NginxProvisioner) buildNginxPodTemplateSpec( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, + ports map[int32]struct{}, + agentTLSSecretName string, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, +) corev1.PodTemplateSpec { + containerPorts := make([]corev1.ContainerPort, 0, len(ports)) + for port := range ports { + containerPort := corev1.ContainerPort{ + Name: fmt.Sprintf("port-%d", port), + ContainerPort: port, + } + containerPorts = append(containerPorts, containerPort) + } + + podAnnotations := make(map[string]string) + maps.Copy(podAnnotations, objectMeta.Annotations) + + metricsPort := config.DefaultNginxMetricsPort + if port, enabled := graph.MetricsEnabledForNginxProxy(nProxyCfg); enabled { + if port != nil { + metricsPort = *port + } + + containerPorts = append(containerPorts, corev1.ContainerPort{ + Name: "metrics", + ContainerPort: metricsPort, + }) + + podAnnotations["prometheus.io/scrape"] = "true" + podAnnotations["prometheus.io/port"] = strconv.Itoa(int(metricsPort)) + } + + // need to sort ports so everytime buildNginxPodTemplateSpec is called it will generate the exact same + // array of ports. This is needed to satisfy deterministic results of the method. + sort.Slice(containerPorts, func(i, j int) bool { + return containerPorts[i].ContainerPort < containerPorts[j].ContainerPort + }) + + image, pullPolicy := p.buildImage(nProxyCfg) + tokenAudience := fmt.Sprintf("%s.%s.svc", p.cfg.GatewayPodConfig.ServiceName, p.cfg.GatewayPodConfig.Namespace) + + spec := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: objectMeta.Labels, + Annotations: podAnnotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: image, + ImagePullPolicy: pullPolicy, + Ports: containerPorts, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: helpers.GetPointer(true), + RunAsGroup: helpers.GetPointer[int64](1001), + RunAsUser: helpers.GetPointer[int64](101), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/etc/nginx-agent", Name: "nginx-agent"}, + {MountPath: "/var/run/secrets/ngf", Name: "nginx-agent-tls"}, + {MountPath: "/var/run/secrets/ngf/serviceaccount", Name: "token"}, + {MountPath: "/var/log/nginx-agent", Name: "nginx-agent-log"}, + {MountPath: "/var/lib/nginx-agent", Name: "nginx-agent-lib"}, + {MountPath: "/etc/nginx/conf.d", Name: "nginx-conf"}, + {MountPath: "/etc/nginx/stream-conf.d", Name: "nginx-stream-conf"}, + {MountPath: "/etc/nginx/main-includes", Name: "nginx-main-includes"}, + {MountPath: "/etc/nginx/secrets", Name: "nginx-secrets"}, + {MountPath: "/var/run/nginx", Name: "nginx-run"}, + {MountPath: "/var/cache/nginx", Name: "nginx-cache"}, + {MountPath: "/etc/nginx/includes", Name: "nginx-includes"}, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init", + Image: p.cfg.GatewayPodConfig.Image, + ImagePullPolicy: pullPolicy, + Command: []string{ + "/usr/bin/gateway", + "initialize", + "--source", "/agent/nginx-agent.conf", + "--destination", "/etc/nginx-agent", + "--source", "/includes/main.conf", + "--destination", "/etc/nginx/main-includes", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/agent", Name: "nginx-agent-config"}, + {MountPath: "/etc/nginx-agent", Name: "nginx-agent"}, + {MountPath: "/includes", Name: "nginx-includes-bootstrap"}, + {MountPath: "/etc/nginx/main-includes", Name: "nginx-main-includes"}, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: helpers.GetPointer(true), + RunAsGroup: helpers.GetPointer[int64](1001), + RunAsUser: helpers.GetPointer[int64](101), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + ImagePullSecrets: []corev1.LocalObjectReference{}, + ServiceAccountName: objectMeta.Name, + SecurityContext: &corev1.PodSecurityContext{ + FSGroup: helpers.GetPointer[int64](1001), + RunAsNonRoot: helpers.GetPointer(true), + }, + Volumes: []corev1.Volume{ + { + Name: "token", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: "token", + Audience: tokenAudience, + }, + }, + }, + }, + }, + }, + {Name: "nginx-agent", VolumeSource: emptyDirVolumeSource}, + { + Name: "nginx-agent-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ngxAgentConfigMapName, + }, + }, + }, + }, + { + Name: "nginx-agent-tls", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: agentTLSSecretName, + }, + }, + }, + {Name: "nginx-agent-log", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-agent-lib", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-conf", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-stream-conf", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-main-includes", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-secrets", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-run", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-cache", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-includes", VolumeSource: emptyDirVolumeSource}, + { + Name: "nginx-includes-bootstrap", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ngxIncludesConfigMapName, + }, + }, + }, + }, + }, + }, + } + + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil { + var podSpec *ngfAPIv1alpha2.PodSpec + var containerSpec *ngfAPIv1alpha2.ContainerSpec + if nProxyCfg.Kubernetes.Deployment != nil { + podSpec = &nProxyCfg.Kubernetes.Deployment.Pod + containerSpec = &nProxyCfg.Kubernetes.Deployment.Container + } + + if podSpec != nil { + spec.Spec.TerminationGracePeriodSeconds = podSpec.TerminationGracePeriodSeconds + spec.Spec.Affinity = podSpec.Affinity + spec.Spec.NodeSelector = podSpec.NodeSelector + spec.Spec.Tolerations = podSpec.Tolerations + spec.Spec.Volumes = append(spec.Spec.Volumes, podSpec.Volumes...) + spec.Spec.TopologySpreadConstraints = podSpec.TopologySpreadConstraints + } + + if containerSpec != nil { + container := spec.Spec.Containers[0] + if containerSpec.Resources != nil { + container.Resources = *containerSpec.Resources + } + container.Lifecycle = containerSpec.Lifecycle + container.VolumeMounts = append(container.VolumeMounts, containerSpec.VolumeMounts...) + + if containerSpec.Debug != nil && *containerSpec.Debug { + container.Command = append(container.Command, "/agent/entrypoint.sh") + container.Args = append(container.Args, "debug") + } + spec.Spec.Containers[0] = container + } + } + + for name := range dockerSecretNames { + ref := corev1.LocalObjectReference{Name: name} + spec.Spec.ImagePullSecrets = append(spec.Spec.ImagePullSecrets, ref) + } + + // need to sort secret names so everytime buildNginxPodTemplateSpec is called it will generate the exact same + // array of secrets. This is needed to satisfy deterministic results of the method. + sort.Slice(spec.Spec.ImagePullSecrets, func(i, j int) bool { + return spec.Spec.ImagePullSecrets[i].Name < spec.Spec.ImagePullSecrets[j].Name + }) + + if p.cfg.Plus { + initCmd := spec.Spec.InitContainers[0].Command + initCmd = append(initCmd, + "--source", "/includes/mgmt.conf", "--destination", "/etc/nginx/main-includes", "--nginx-plus") + spec.Spec.InitContainers[0].Command = initCmd + + volumeMounts := spec.Spec.Containers[0].VolumeMounts + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-lib", + MountPath: "/var/lib/nginx/state", + }) + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-lib", + VolumeSource: emptyDirVolumeSource, + }) + + if jwtSecretName != "" { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-plus-license", + MountPath: "/etc/nginx/license.jwt", + SubPath: "license.jwt", + }) + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-plus-license", + VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: jwtSecretName}}, + }) + } + if caSecretName != "" || clientSSLSecretName != "" { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-plus-usage-certs", + MountPath: "/etc/nginx/certs-bootstrap/", + }) + + sources := []corev1.VolumeProjection{} + + if caSecretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: caSecretName}, + }, + }) + } + + if clientSSLSecretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: clientSSLSecretName}, + }, + }) + } + + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-plus-usage-certs", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + } + + spec.Spec.Containers[0].VolumeMounts = volumeMounts + } + + return spec +} + +func (p *NginxProvisioner) buildImage(nProxyCfg *graph.EffectiveNginxProxy) (string, corev1.PullPolicy) { + image := defaultNginxImagePath + tag := p.cfg.GatewayPodConfig.Version + pullPolicy := defaultImagePullPolicy + + getImageAndPullPolicy := func(container ngfAPIv1alpha2.ContainerSpec) (string, string, corev1.PullPolicy) { + if container.Image != nil { + if container.Image.Repository != nil { + image = *container.Image.Repository + } + if container.Image.Tag != nil { + tag = *container.Image.Tag + } + if container.Image.PullPolicy != nil { + pullPolicy = corev1.PullPolicy(*container.Image.PullPolicy) + } + } + + return image, tag, pullPolicy + } + + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil { + if nProxyCfg.Kubernetes.Deployment != nil { + image, tag, pullPolicy = getImageAndPullPolicy(nProxyCfg.Kubernetes.Deployment.Container) + } + } + + return fmt.Sprintf("%s:%s", image, tag), pullPolicy +} + +// TODO(sberman): see about how this can be made more elegant. Maybe create some sort of Object factory +// that can better store/build all the objects we need, to reduce the amount of duplicate object lists that we +// have everywhere. +func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName types.NamespacedName) []client.Object { + // order to delete: + // deployment/daemonset + // service + // role/binding (if openshift) + // serviceaccount + // configmaps + // secrets + + objectMeta := metav1.ObjectMeta{ + Name: deploymentNSName.Name, + Namespace: deploymentNSName.Namespace, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: objectMeta, + } + service := &corev1.Service{ + ObjectMeta: objectMeta, + } + + objects := []client.Object{deployment, service} + + if p.isOpenshift { + role := &rbacv1.Role{ + ObjectMeta: objectMeta, + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: objectMeta, + } + objects = append(objects, role, roleBinding) + } + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: objectMeta, + } + bootstrapCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: deploymentNSName.Namespace, + }, + } + agentCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix), + Namespace: deploymentNSName.Namespace, + }, + } + + objects = append(objects, serviceAccount, bootstrapCM, agentCM) + + agentTLSSecretName := controller.CreateNginxResourceName( + deploymentNSName.Name, + p.cfg.AgentTLSSecretName, + ) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, agentTLSSecret) + + for _, name := range p.cfg.NginxDockerSecretNames { + newName := controller.CreateNginxResourceName(deploymentNSName.Name, name) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: newName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, secret) + } + + var jwtSecretName, caSecretName, clientSSLSecretName string + if p.cfg.Plus { + if p.cfg.PlusUsageConfig.CASecretName != "" { + caSecretName = controller.CreateNginxResourceName(deploymentNSName.Name, p.cfg.PlusUsageConfig.CASecretName) + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, caSecret) + } + if p.cfg.PlusUsageConfig.ClientSSLSecretName != "" { + clientSSLSecretName = controller.CreateNginxResourceName( + deploymentNSName.Name, + p.cfg.PlusUsageConfig.ClientSSLSecretName, + ) + clientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientSSLSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, clientSSLSecret) + } + + jwtSecretName = controller.CreateNginxResourceName(deploymentNSName.Name, p.cfg.PlusUsageConfig.SecretName) + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, jwtSecret) + } + + return objects +} diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go new file mode 100644 index 0000000000..9225e8a0ce --- /dev/null +++ b/internal/mode/static/provisioner/objects_test.go @@ -0,0 +1,868 @@ +package provisioner + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +func TestBuildNginxResourceObjects(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + Version: "1.0.0", + Image: "ngf-image", + }, + AgentTLSSecretName: agentTLSTestSecretName, + }, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + k8sClient: fakeClient, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + Spec: gatewayv1.GatewaySpec{ + Infrastructure: &gatewayv1.GatewayInfrastructure{ + Labels: map[gatewayv1.LabelKey]gatewayv1.LabelValue{ + "label": "value", + }, + Annotations: map[gatewayv1.AnnotationKey]gatewayv1.AnnotationValue{ + "annotation": "value", + }, + }, + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + { + Port: 8888, + }, + { + Port: 9999, + }, + }, + }, + } + + expLabels := map[string]string{ + "label": "value", + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + expAnnotations := map[string]string{ + "annotation": "value", + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects( + resourceName, + gateway, + &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Service: &ngfAPIv1alpha2.ServiceSpec{ + NodePorts: []ngfAPIv1alpha2.NodePort{ + { + Port: 30000, + ListenerPort: 80, + }, + { // ignored + Port: 31000, + ListenerPort: 789, + }, + }, + }, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(6)) + + validateLabelsAndAnnotations := func(obj client.Object) { + g.Expect(obj.GetLabels()).To(Equal(expLabels)) + g.Expect(obj.GetAnnotations()).To(Equal(expAnnotations)) + } + + validateMeta := func(obj client.Object) { + g.Expect(obj.GetName()).To(Equal(resourceName)) + validateLabelsAndAnnotations(obj) + } + + secretObj := objects[0] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, agentTLSTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("tls.crt")) + g.Expect(secret.Data["tls.crt"]).To(Equal([]byte("tls"))) + + cmObj := objects[1] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix))) + validateLabelsAndAnnotations(cm) + g.Expect(cm.Data).To(HaveKey("main.conf")) + g.Expect(cm.Data["main.conf"]).To(ContainSubstring("info")) + + cmObj = objects[2] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix))) + validateLabelsAndAnnotations(cm) + g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("command:")) + + svcAcctObj := objects[3] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct) + + svcObj := objects[4] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc) + g.Expect(svc.Spec.Type).To(Equal(defaultServiceType)) + g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(defaultServicePolicy)) + + // service ports is sorted in ascending order by port number when we make the nginx object + g.Expect(svc.Spec.Ports).To(Equal([]corev1.ServicePort{ + { + Port: 80, + Name: "port-80", + TargetPort: intstr.FromInt(80), + NodePort: 30000, + }, + { + Port: 8888, + Name: "port-8888", + TargetPort: intstr.FromInt(8888), + }, + { + Port: 9999, + Name: "port-9999", + TargetPort: intstr.FromInt(9999), + }, + })) + + depObj := objects[5] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep) + + template := dep.Spec.Template + g.Expect(template.GetAnnotations()).To(HaveKey("prometheus.io/scrape")) + g.Expect(template.Spec.Containers).To(HaveLen(1)) + container := template.Spec.Containers[0] + + // container ports is sorted in ascending order by port number when we make the nginx object + g.Expect(container.Ports).To(Equal([]corev1.ContainerPort{ + { + ContainerPort: 80, + Name: "port-80", + }, + { + ContainerPort: 8888, + Name: "port-8888", + }, + { + ContainerPort: config.DefaultNginxMetricsPort, + Name: "metrics", + }, + { + ContainerPort: 9999, + Name: "port-9999", + }, + })) + + g.Expect(container.Image).To(Equal(fmt.Sprintf("%s:1.0.0", defaultNginxImagePath))) + g.Expect(container.ImagePullPolicy).To(Equal(defaultImagePullPolicy)) + + g.Expect(template.Spec.InitContainers).To(HaveLen(1)) + initContainer := template.Spec.InitContainers[0] + + g.Expect(initContainer.Image).To(Equal("ngf-image")) + g.Expect(initContainer.ImagePullPolicy).To(Equal(defaultImagePullPolicy)) +} + +func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + Version: "1.0.0", + }, + AgentTLSSecretName: agentTLSTestSecretName, + }, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + k8sClient: fakeClient, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + nProxyCfg := &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + Metrics: &ngfAPIv1alpha2.Metrics{ + Port: helpers.GetPointer[int32](8080), + }, + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Service: &ngfAPIv1alpha2.ServiceSpec{ + ServiceType: helpers.GetPointer(ngfAPIv1alpha2.ServiceTypeNodePort), + ExternalTrafficPolicy: helpers.GetPointer(ngfAPIv1alpha2.ExternalTrafficPolicyCluster), + LoadBalancerIP: helpers.GetPointer("1.2.3.4"), + LoadBalancerClass: helpers.GetPointer("myLoadBalancerClass"), + LoadBalancerSourceRanges: []string{"5.6.7.8"}, + }, + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](3), + Pod: ngfAPIv1alpha2.PodSpec{ + TerminationGracePeriodSeconds: helpers.GetPointer[int64](25), + }, + Container: ngfAPIv1alpha2.ContainerSpec{ + Image: &ngfAPIv1alpha2.Image{ + Repository: helpers.GetPointer("nginx-repo"), + Tag: helpers.GetPointer("1.1.1"), + PullPolicy: helpers.GetPointer(ngfAPIv1alpha2.PullAlways), + }, + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.Quantity{Format: "100m"}, + }, + }, + }, + }, + }, + } + + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(6)) + + cmObj := objects[1] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("main.conf")) + g.Expect(cm.Data["main.conf"]).To(ContainSubstring("debug")) + + cmObj = objects[2] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("level: debug")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("port: 8080")) + + svcObj := objects[4] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(corev1.ServiceExternalTrafficPolicyTypeCluster)) + g.Expect(svc.Spec.LoadBalancerIP).To(Equal("1.2.3.4")) + g.Expect(*svc.Spec.LoadBalancerClass).To(Equal("myLoadBalancerClass")) + g.Expect(svc.Spec.LoadBalancerSourceRanges).To(Equal([]string{"5.6.7.8"})) + + depObj := objects[5] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + template := dep.Spec.Template + g.Expect(*template.Spec.TerminationGracePeriodSeconds).To(Equal(int64(25))) + + container := template.Spec.Containers[0] + + g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ + ContainerPort: 8080, + Name: "metrics", + })) + + g.Expect(container.Image).To(Equal("nginx-repo:1.1.1")) + g.Expect(container.ImagePullPolicy).To(Equal(corev1.PullAlways)) + g.Expect(container.Resources.Limits).To(HaveKey(corev1.ResourceCPU)) + g.Expect(container.Resources.Limits[corev1.ResourceCPU].Format).To(Equal(resource.Format("100m"))) +} + +func TestBuildNginxResourceObjects_Plus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"license.jwt": []byte("jwt")}, + } + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"ca.crt": []byte("ca")}, + } + clientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + + fakeClient := fake.NewFakeClient(agentTLSSecret, jwtSecret, caSecret, clientSSLSecret) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + Endpoint: "test.com", + SkipVerify: true, + }, + AgentTLSSecretName: agentTLSTestSecretName, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + Spec: gatewayv1.GatewaySpec{ + Infrastructure: &gatewayv1.GatewayInfrastructure{ + Labels: map[gatewayv1.LabelKey]gatewayv1.LabelValue{ + "label": "value", + }, + Annotations: map[gatewayv1.AnnotationKey]gatewayv1.AnnotationValue{ + "annotation": "value", + }, + }, + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(9)) + + expLabels := map[string]string{ + "label": "value", + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + expAnnotations := map[string]string{ + "annotation": "value", + } + + secretObj := objects[1] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, jwtTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("license.jwt")) + g.Expect(secret.Data["license.jwt"]).To(Equal([]byte("jwt"))) + + secretObj = objects[2] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, caTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("ca.crt")) + g.Expect(secret.Data["ca.crt"]).To(Equal([]byte("ca"))) + + secretObj = objects[3] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, clientTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("tls.crt")) + g.Expect(secret.Data["tls.crt"]).To(Equal([]byte("tls"))) + + cmObj := objects[4] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("mgmt.conf")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("usage_report endpoint=test.com;")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_verify off;")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_trusted_certificate")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate_key")) + + cmObj = objects[5] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("api-action")) + + depObj := objects[8] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + template := dep.Spec.Template + container := template.Spec.Containers[0] + initContainer := template.Spec.InitContainers[0] + + g.Expect(initContainer.Command).To(ContainElement("/includes/mgmt.conf")) + g.Expect(container.VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "nginx-plus-license", + MountPath: "/etc/nginx/license.jwt", + SubPath: "license.jwt", + })) + g.Expect(container.VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "nginx-plus-usage-certs", + MountPath: "/etc/nginx/certs-bootstrap/", + })) +} + +func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + + dockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker")}, + } + + dockerSecretRegistry1Name := dockerTestSecretName + "-registry1" + dockerSecretRegistry1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerSecretRegistry1Name, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker-registry1")}, + } + + dockerSecretRegistry2Name := dockerTestSecretName + "-registry2" + dockerSecretRegistry2 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerSecretRegistry2Name, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker-registry2")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret, dockerSecret, dockerSecretRegistry1, dockerSecretRegistry2) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + NginxDockerSecretNames: []string{dockerTestSecretName, dockerSecretRegistry1Name, dockerSecretRegistry2Name}, + AgentTLSSecretName: agentTLSTestSecretName, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(9)) + + expLabels := map[string]string{ + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + + secretObj := objects[0] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, agentTLSTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + // the (docker-only) secret order in the object list is sorted by secret name + + secretObj = objects[1] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + registry1SecretObj := objects[2] + secret, ok = registry1SecretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry1Name))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + registry2SecretObj := objects[3] + secret, ok = registry2SecretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry2Name))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + depObj := objects[8] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + // imagePullSecrets is sorted by name when we make the nginx object + g.Expect(dep.Spec.Template.Spec.ImagePullSecrets).To(Equal([]corev1.LocalObjectReference{ + { + Name: controller.CreateNginxResourceName(resourceName, dockerTestSecretName), + }, + { + Name: controller.CreateNginxResourceName(resourceName, dockerSecretRegistry1Name), + }, + { + Name: controller.CreateNginxResourceName(resourceName, dockerSecretRegistry2Name), + }, + })) +} + +func TestBuildNginxResourceObjects_OpenShift(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + + provisioner := &NginxProvisioner{ + isOpenshift: true, + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + AgentTLSSecretName: agentTLSTestSecretName, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(8)) + + expLabels := map[string]string{ + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + + roleObj := objects[4] + role, ok := roleObj.(*rbacv1.Role) + g.Expect(ok).To(BeTrue()) + g.Expect(role.GetLabels()).To(Equal(expLabels)) + + roleBindingObj := objects[5] + roleBinding, ok := roleBindingObj.(*rbacv1.RoleBinding) + g.Expect(ok).To(BeTrue()) + g.Expect(roleBinding.GetLabels()).To(Equal(expLabels)) +} + +func TestGetAndUpdateSecret_NotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeClient := fake.NewFakeClient() + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: "default", + }, + }, + k8sClient: fakeClient, + } + + _, err := provisioner.getAndUpdateSecret( + "non-existent-secret", + metav1.ObjectMeta{ + Name: "new-secret", + Namespace: "default", + }, + corev1.SecretTypeOpaque, + ) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("error getting secret")) +} + +func TestBuildNginxResourceObjectsForDeletion(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{} + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(6)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + depObj := objects[0] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep, deploymentNSName.Name) + + svcObj := objects[1] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc, deploymentNSName.Name) + + svcAcctObj := objects[2] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct, deploymentNSName.Name) + + cmObj := objects[3] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix)) + + cmObj = objects[4] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix)) +} + +func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{ + cfg: Config{ + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + }, + NginxDockerSecretNames: []string{dockerTestSecretName}, + AgentTLSSecretName: agentTLSTestSecretName, + }, + } + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(10)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + depObj := objects[0] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep, deploymentNSName.Name) + + svcObj := objects[1] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc, deploymentNSName.Name) + + svcAcctObj := objects[2] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct, deploymentNSName.Name) + + cmObj := objects[3] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix)) + + cmObj = objects[4] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix)) + + secretObj := objects[5] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.AgentTLSSecretName, + )) + + secretObj = objects[6] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.NginxDockerSecretNames[0], + )) + + secretObj = objects[7] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.PlusUsageConfig.CASecretName, + )) + + secretObj = objects[8] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.PlusUsageConfig.ClientSSLSecretName, + )) +} + +func TestBuildNginxResourceObjectsForDeletion_OpenShift(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{isOpenshift: true} + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(8)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + roleObj := objects[2] + role, ok := roleObj.(*rbacv1.Role) + g.Expect(ok).To(BeTrue()) + validateMeta(role, deploymentNSName.Name) + + roleBindingObj := objects[3] + roleBinding, ok := roleBindingObj.(*rbacv1.RoleBinding) + g.Expect(ok).To(BeTrue()) + validateMeta(roleBinding, deploymentNSName.Name) +} diff --git a/internal/mode/static/provisioner/openshift/openshift.go b/internal/mode/static/provisioner/openshift/openshift.go new file mode 100644 index 0000000000..3c89c4c988 --- /dev/null +++ b/internal/mode/static/provisioner/openshift/openshift.go @@ -0,0 +1,38 @@ +package openshift + +import ( + "fmt" + + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . APIChecker + +type APIChecker interface { + IsOpenshift(*rest.Config) (bool, error) +} + +type APICheckerImpl struct{} + +func (o *APICheckerImpl) IsOpenshift(config *rest.Config) (bool, error) { + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return false, fmt.Errorf("error creating discovery client: %w", err) + } + + apiList, err := discoveryClient.ServerGroups() + if err != nil { + return false, fmt.Errorf("error getting server groups: %w", err) + } + + for _, group := range apiList.Groups { + if group.Name == "security.openshift.io" { + return true, nil + } + } + + return false, nil +} diff --git a/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go b/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go new file mode 100644 index 0000000000..d1e108544d --- /dev/null +++ b/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package openshiftfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift" + "k8s.io/client-go/rest" +) + +type FakeAPIChecker struct { + IsOpenshiftStub func(*rest.Config) (bool, error) + isOpenshiftMutex sync.RWMutex + isOpenshiftArgsForCall []struct { + arg1 *rest.Config + } + isOpenshiftReturns struct { + result1 bool + result2 error + } + isOpenshiftReturnsOnCall map[int]struct { + result1 bool + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeAPIChecker) IsOpenshift(arg1 *rest.Config) (bool, error) { + fake.isOpenshiftMutex.Lock() + ret, specificReturn := fake.isOpenshiftReturnsOnCall[len(fake.isOpenshiftArgsForCall)] + fake.isOpenshiftArgsForCall = append(fake.isOpenshiftArgsForCall, struct { + arg1 *rest.Config + }{arg1}) + stub := fake.IsOpenshiftStub + fakeReturns := fake.isOpenshiftReturns + fake.recordInvocation("IsOpenshift", []interface{}{arg1}) + fake.isOpenshiftMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeAPIChecker) IsOpenshiftCallCount() int { + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + return len(fake.isOpenshiftArgsForCall) +} + +func (fake *FakeAPIChecker) IsOpenshiftCalls(stub func(*rest.Config) (bool, error)) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = stub +} + +func (fake *FakeAPIChecker) IsOpenshiftArgsForCall(i int) *rest.Config { + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + argsForCall := fake.isOpenshiftArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeAPIChecker) IsOpenshiftReturns(result1 bool, result2 error) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = nil + fake.isOpenshiftReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeAPIChecker) IsOpenshiftReturnsOnCall(i int, result1 bool, result2 error) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = nil + if fake.isOpenshiftReturnsOnCall == nil { + fake.isOpenshiftReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.isOpenshiftReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeAPIChecker) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeAPIChecker) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ openshift.APIChecker = new(FakeAPIChecker) diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go new file mode 100644 index 0000000000..0b6548b416 --- /dev/null +++ b/internal/mode/static/provisioner/provisioner.go @@ -0,0 +1,444 @@ +package provisioner + +import ( + "context" + "fmt" + "slices" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + "golang.org/x/text/cases" + "golang.org/x/text/language" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . Provisioner + +// Provisioner is an interface for triggering NGINX resources to be created/updated/deleted. +type Provisioner interface { + RegisterGateway(ctx context.Context, gateway *graph.Gateway, resourceName string) error +} + +// Config is the configuration for the Provisioner. +type Config struct { + GCName string + AgentTLSSecretName string + NGINXSCCName string + + DeploymentStore agent.DeploymentStorer + StatusQueue *status.Queue + GatewayPodConfig *config.GatewayPodConfig + PlusUsageConfig *config.UsageReportConfig + EventRecorder record.EventRecorder + Logger logr.Logger + NginxDockerSecretNames []string + + Plus bool +} + +// NginxProvisioner handles provisioning nginx kubernetes resources. +type NginxProvisioner struct { + store *store + k8sClient client.Client + // resourcesToDeleteOnStartup contains a list of Gateway names that no longer exist + // but have nginx resources tied to them that need to be deleted. + resourcesToDeleteOnStartup []types.NamespacedName + baseLabelSelector metav1.LabelSelector + cfg Config + leader bool + isOpenshift bool + + lock sync.RWMutex +} + +var apiChecker openshift.APIChecker = &openshift.APICheckerImpl{} + +// NewNginxProvisioner returns a new instance of a Provisioner that will deploy nginx resources. +func NewNginxProvisioner( + ctx context.Context, + mgr manager.Manager, + cfg Config, +) (*NginxProvisioner, *events.EventLoop, error) { + var jwtSecretName, caSecretName, clientSSLSecretName string + if cfg.Plus && cfg.PlusUsageConfig != nil { + jwtSecretName = cfg.PlusUsageConfig.SecretName + caSecretName = cfg.PlusUsageConfig.CASecretName + clientSSLSecretName = cfg.PlusUsageConfig.ClientSSLSecretName + } + + store := newStore( + cfg.NginxDockerSecretNames, + cfg.AgentTLSSecretName, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) + + selector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + controller.AppInstanceLabel: cfg.GatewayPodConfig.InstanceName, + controller.AppManagedByLabel: controller.CreateNginxResourceName( + cfg.GatewayPodConfig.InstanceName, + cfg.GCName, + ), + }, + } + + isOpenshift, err := apiChecker.IsOpenshift(mgr.GetConfig()) + if err != nil { + cfg.Logger.Error(err, "could not determine if running in openshift, will not create Role/RoleBinding") + } + + provisioner := &NginxProvisioner{ + k8sClient: mgr.GetClient(), + store: store, + baseLabelSelector: selector, + resourcesToDeleteOnStartup: []types.NamespacedName{}, + cfg: cfg, + isOpenshift: isOpenshift, + } + + handler, err := newEventHandler(store, provisioner, selector, cfg.GCName) + if err != nil { + return nil, nil, fmt.Errorf("error initializing eventHandler: %w", err) + } + + eventLoop, err := newEventLoop( + ctx, + mgr, + handler, + cfg.Logger, + selector, + cfg.GatewayPodConfig.Namespace, + cfg.NginxDockerSecretNames, + cfg.AgentTLSSecretName, + cfg.PlusUsageConfig, + isOpenshift, + ) + if err != nil { + return nil, nil, err + } + + return provisioner, eventLoop, nil +} + +// Enable is called when the Pod becomes leader and allows the provisioner to manage resources. +func (p *NginxProvisioner) Enable(ctx context.Context) { + p.lock.Lock() + p.leader = true + p.lock.Unlock() + + p.lock.RLock() + for _, gatewayNSName := range p.resourcesToDeleteOnStartup { + if err := p.deprovisionNginx(ctx, gatewayNSName); err != nil { + p.cfg.Logger.Error(err, "error deprovisioning nginx resources on startup") + } + } + p.lock.RUnlock() + + p.lock.Lock() + p.resourcesToDeleteOnStartup = []types.NamespacedName{} + p.lock.Unlock() +} + +// isLeader returns whether or not this provisioner is the leader. +func (p *NginxProvisioner) isLeader() bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.leader +} + +// setResourceToDelete is called when there are resources to delete, but this pod is not leader. +// Once it becomes leader, it will delete those resources. +func (p *NginxProvisioner) setResourceToDelete(gatewayNSName types.NamespacedName) { + p.lock.Lock() + defer p.lock.Unlock() + + p.resourcesToDeleteOnStartup = append(p.resourcesToDeleteOnStartup, gatewayNSName) +} + +//nolint:gocyclo // will refactor at some point +func (p *NginxProvisioner) provisionNginx( + ctx context.Context, + resourceName string, + gateway *gatewayv1.Gateway, + objects []client.Object, +) error { + if !p.isLeader() { + return nil + } + + p.cfg.Logger.Info( + "Creating/Updating nginx resources", + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + + var agentConfigMapUpdated, deploymentCreated bool + var deploymentObj *appsv1.Deployment + for _, obj := range objects { + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + + var res controllerutil.OperationResult + if err := wait.PollUntilContextCancel( + createCtx, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + var upsertErr error + res, upsertErr = controllerutil.CreateOrUpdate(ctx, p.k8sClient, obj, objectSpecSetter(obj)) + if upsertErr != nil { + if !apierrors.IsAlreadyExists(upsertErr) && !apierrors.IsConflict(upsertErr) { + return false, upsertErr + } + if apierrors.IsConflict(upsertErr) { + return false, nil + } + } + return true, nil + }, + ); err != nil { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "CreateOrUpdateFailed", + "Failed to create or update nginx resource: %s", + err.Error(), + ) + cancel() + return err + } + cancel() + + switch o := obj.(type) { + case *appsv1.Deployment: + deploymentObj = o + if res == controllerutil.OperationResultCreated { + deploymentCreated = true + } + case *corev1.ConfigMap: + if res == controllerutil.OperationResultUpdated && + strings.Contains(obj.GetName(), nginxAgentConfigMapNameSuffix) { + agentConfigMapUpdated = true + } + } + + if res != controllerutil.OperationResultCreated && res != controllerutil.OperationResultUpdated { + continue + } + + result := cases.Title(language.English, cases.Compact).String(string(res)) + p.cfg.Logger.V(1).Info( + fmt.Sprintf("%s nginx %s", result, obj.GetObjectKind().GroupVersionKind().Kind), + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + p.store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), obj) + } + + // if agent configmap was updated, then we'll need to restart the deployment + if agentConfigMapUpdated && !deploymentCreated && deploymentObj != nil { + updateCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + p.cfg.Logger.V(1).Info( + "Restarting nginx deployment after agent configmap update", + "name", deploymentObj.GetName(), + "namespace", deploymentObj.GetNamespace(), + ) + + if deploymentObj.Spec.Template.Annotations == nil { + deploymentObj.Annotations = make(map[string]string) + } + deploymentObj.Spec.Template.Annotations[controller.RestartedAnnotation] = time.Now().Format(time.RFC3339) + + if err := p.k8sClient.Update(updateCtx, deploymentObj); err != nil && !apierrors.IsConflict(err) { + p.cfg.EventRecorder.Eventf( + deploymentObj, + corev1.EventTypeWarning, + "RestartFailed", + "Failed to restart nginx deployment after agent config update: %s", + err.Error(), + ) + return err + } + } + + return nil +} + +func (p *NginxProvisioner) reprovisionNginx( + ctx context.Context, + resourceName string, + gateway *gatewayv1.Gateway, + nProxyCfg *graph.EffectiveNginxProxy, +) error { + if !p.isLeader() { + return nil + } + + objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + if err != nil { + p.cfg.Logger.Error(err, "error provisioning some nginx resources") + } + + p.cfg.Logger.Info( + "Re-creating nginx resources", + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for _, obj := range objects { + if err := p.k8sClient.Create(createCtx, obj); err != nil && !apierrors.IsAlreadyExists(err) { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "CreateFailed", + "Failed to create nginx resource: %s", + err.Error(), + ) + return err + } + } + + return nil +} + +func (p *NginxProvisioner) deprovisionNginx(ctx context.Context, gatewayNSName types.NamespacedName) error { + deploymentNSName := types.NamespacedName{ + Name: controller.CreateNginxResourceName(gatewayNSName.Name, p.cfg.GCName), + Namespace: gatewayNSName.Namespace, + } + + if p.isLeader() { + p.cfg.Logger.Info( + "Removing nginx resources for Gateway", + "name", gatewayNSName.Name, + "namespace", gatewayNSName.Namespace, + ) + + objects := p.buildNginxResourceObjectsForDeletion(deploymentNSName) + + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for _, obj := range objects { + if err := p.k8sClient.Delete(createCtx, obj); err != nil && !apierrors.IsNotFound(err) { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "DeleteFailed", + "Failed to delete nginx resource: %s", + err.Error(), + ) + return err + } + } + } + + p.store.deleteResourcesForGateway(gatewayNSName) + p.cfg.DeploymentStore.Remove(deploymentNSName) + + return nil +} + +// isUserSecret determines if the provided secret name is a special user secret, +// for example an NGINX docker registry secret or NGINX Plus secret. +func (p *NginxProvisioner) isUserSecret(name string) bool { + if name == p.cfg.AgentTLSSecretName { + return true + } + + if slices.Contains(p.cfg.NginxDockerSecretNames, name) { + return true + } + + if p.cfg.PlusUsageConfig != nil { + return name == p.cfg.PlusUsageConfig.SecretName || + name == p.cfg.PlusUsageConfig.CASecretName || + name == p.cfg.PlusUsageConfig.ClientSSLSecretName + } + + return false +} + +func (p *NginxProvisioner) deleteSecret(ctx context.Context, secretNSName types.NamespacedName) error { + if !p.isLeader() { + return nil + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNSName.Name, + Namespace: secretNSName.Namespace, + }, + } + + if err := p.k8sClient.Delete(ctx, secret); err != nil && !apierrors.IsNotFound(err) { + return err + } + + return nil +} + +// RegisterGateway is called by the main event handler when a Gateway API resource event occurs +// and the graph is built. The provisioner updates the Gateway config in the store and then: +// - If it's a valid Gateway, create or update nginx resources associated with the Gateway, if necessary. +// - If it's an invalid Gateway, delete the associated nginx resources. +func (p *NginxProvisioner) RegisterGateway( + ctx context.Context, + gateway *graph.Gateway, + resourceName string, +) error { + if !p.isLeader() { + return nil + } + + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if updated := p.store.registerResourceInGatewayConfig(gatewayNSName, gateway); !updated { + return nil + } + + if gateway.Valid { + objects, err := p.buildNginxResourceObjects(resourceName, gateway.Source, gateway.EffectiveNginxProxy) + if err != nil { + p.cfg.Logger.Error(err, "error building some nginx resources") + } + + if err := p.provisionNginx(ctx, resourceName, gateway.Source, objects); err != nil { + return fmt.Errorf("error provisioning nginx resources: %w", err) + } + } else { + if err := p.deprovisionNginx(ctx, gatewayNSName); err != nil { + return fmt.Errorf("error deprovisioning nginx resources: %w", err) + } + } + + return nil +} diff --git a/internal/mode/static/provisioner/provisioner_test.go b/internal/mode/static/provisioner/provisioner_test.go new file mode 100644 index 0000000000..d89caefade --- /dev/null +++ b/internal/mode/static/provisioner/provisioner_test.go @@ -0,0 +1,396 @@ +package provisioner + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/manager" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift/openshiftfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +const ( + agentTLSTestSecretName = "agent-tls-secret" + jwtTestSecretName = "jwt-secret" + caTestSecretName = "ca-secret" + clientTestSecretName = "client-secret" + dockerTestSecretName = "docker-secret" + ngfNamespace = "nginx-gateway" +) + +func createScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + + utilruntime.Must(gatewayv1.Install(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + + return scheme +} + +func expectResourcesToExist(g *WithT, k8sClient client.Client, nsName types.NamespacedName, plus bool) { + g.Expect(k8sClient.Get(context.TODO(), nsName, &appsv1.Deployment{})).To(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.Service{})).To(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.ServiceAccount{})).To(Succeed()) + + boostrapCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), boostrapCM, &corev1.ConfigMap{})).To(Succeed()) + + agentCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxAgentConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).To(Succeed()) + + agentTLSSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, agentTLSTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentTLSSecret, &corev1.Secret{})).To(Succeed()) + + if !plus { + return + } + + jwtSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, jwtTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), jwtSecret, &corev1.Secret{})).To(Succeed()) + + caSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, caTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), caSecret, &corev1.Secret{})).To(Succeed()) + + clientSSLSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, clientTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), clientSSLSecret, &corev1.Secret{})).To(Succeed()) + + dockerSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, dockerTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), dockerSecret, &corev1.Secret{})).To(Succeed()) +} + +func expectResourcesToNotExist(g *WithT, k8sClient client.Client, nsName types.NamespacedName) { + g.Expect(k8sClient.Get(context.TODO(), nsName, &appsv1.Deployment{})).ToNot(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.Service{})).ToNot(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.ServiceAccount{})).ToNot(Succeed()) + + boostrapCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), boostrapCM, &corev1.ConfigMap{})).ToNot(Succeed()) + + agentCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxAgentConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).ToNot(Succeed()) + + agentTLSSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, agentTLSTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentTLSSecret, &corev1.Secret{})).ToNot(Succeed()) + + jwtSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, jwtTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), jwtSecret, &corev1.Secret{})).ToNot(Succeed()) + + caSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, caTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), caSecret, &corev1.Secret{})).ToNot(Succeed()) + + clientSSLSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, clientTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), clientSSLSecret, &corev1.Secret{})).ToNot(Succeed()) + + dockerSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, dockerTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), dockerSecret, &corev1.Secret{})).ToNot(Succeed()) +} + +func defaultNginxProvisioner( + objects ...client.Object, +) (*NginxProvisioner, client.Client, *agentfakes.FakeDeploymentStorer) { + fakeClient := fake.NewClientBuilder().WithScheme(createScheme()).WithObjects(objects...).Build() + deploymentStore := &agentfakes.FakeDeploymentStorer{} + + return &NginxProvisioner{ + store: newStore( + []string{dockerTestSecretName}, + agentTLSTestSecretName, + jwtTestSecretName, + caTestSecretName, + clientTestSecretName, + ), + k8sClient: fakeClient, + cfg: Config{ + DeploymentStore: deploymentStore, + GatewayPodConfig: &config.GatewayPodConfig{ + InstanceName: "test-instance", + Namespace: ngfNamespace, + }, + Logger: logr.Discard(), + EventRecorder: &record.FakeRecorder{}, + GCName: "nginx", + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + }, + NginxDockerSecretNames: []string{dockerTestSecretName}, + AgentTLSSecretName: agentTLSTestSecretName, + }, + leader: true, + }, fakeClient, deploymentStore +} + +func TestNewNginxProvisioner(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + mgr, err := manager.New(&rest.Config{}, manager.Options{Scheme: createScheme()}) + g.Expect(err).ToNot(HaveOccurred()) + + cfg := Config{ + GCName: "test-gc", + GatewayPodConfig: &config.GatewayPodConfig{ + InstanceName: "test-instance", + }, + Logger: logr.Discard(), + } + + apiChecker = &openshiftfakes.FakeAPIChecker{} + provisioner, eventLoop, err := NewNginxProvisioner(context.TODO(), mgr, cfg) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(provisioner).NotTo(BeNil()) + g.Expect(eventLoop).NotTo(BeNil()) + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/managed-by": "test-instance-test-gc", + "app.kubernetes.io/instance": "test-instance", + }, + } + g.Expect(provisioner.baseLabelSelector).To(Equal(labelSelector)) +} + +func TestEnable(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + }, + } + provisioner, fakeClient, _ := defaultNginxProvisioner(dep) + provisioner.setResourceToDelete(types.NamespacedName{Name: "gw", Namespace: "default"}) + provisioner.leader = false + + provisioner.Enable(context.TODO()) + g.Expect(provisioner.isLeader()).To(BeTrue()) + g.Expect(provisioner.resourcesToDeleteOnStartup).To(BeEmpty()) + expectResourcesToNotExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}) +} + +func TestRegisterGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + } + + objects := []client.Object{ + gateway.Source, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + }, + } + + provisioner, fakeClient, deploymentStore := defaultNginxProvisioner(objects...) + + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, true) // plus + + // Call again, no updates so nothing should happen + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, true) // plus + + // Now set the Gateway to invalid, and expect a deprovision to occur + invalid := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: false, + } + g.Expect(provisioner.RegisterGateway(context.TODO(), invalid, "gw-nginx")).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}) + + resources := provisioner.store.getNginxResourcesForGateway(types.NamespacedName{Name: "gw", Namespace: "default"}) + g.Expect(resources).To(BeNil()) + + g.Expect(deploymentStore.RemoveCallCount()).To(Equal(1)) +} + +func TestNonLeaderProvisioner(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner, fakeClient, deploymentStore := defaultNginxProvisioner() + provisioner.leader = false + nsName := types.NamespacedName{Name: "gw-nginx", Namespace: "default"} + + g.Expect(provisioner.RegisterGateway(context.TODO(), nil, "gw-nginx")).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.provisionNginx(context.TODO(), "gw-nginx", nil, nil)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.reprovisionNginx(context.TODO(), "gw-nginx", nil, nil)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.deprovisionNginx(context.TODO(), nsName)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + g.Expect(deploymentStore.RemoveCallCount()).To(Equal(1)) +} + +func TestProvisionerRestartsDeployment(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + }, + } + + // provision everything first + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + provisioner, fakeClient, _ := defaultNginxProvisioner(gateway.Source, agentTLSSecret) + provisioner.cfg.Plus = false + provisioner.cfg.NginxDockerSecretNames = nil + + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, false) // not plus + + // update agent config + updatedConfig := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelInfo), + }, + }, + } + g.Expect(provisioner.RegisterGateway(context.TODO(), updatedConfig, "gw-nginx")).To(Succeed()) + + // verify deployment was updated with the restart annotation + dep := &appsv1.Deployment{} + key := types.NamespacedName{Name: "gw-nginx", Namespace: "default"} + g.Expect(fakeClient.Get(context.TODO(), key, dep)).To(Succeed()) + + g.Expect(dep.Spec.Template.GetAnnotations()).To(HaveKey(controller.RestartedAnnotation)) +} diff --git a/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go b/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go new file mode 100644 index 0000000000..b4359a1ceb --- /dev/null +++ b/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package provisionerfakes + +import ( + "context" + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +type FakeProvisioner struct { + RegisterGatewayStub func(context.Context, *graph.Gateway, string) error + registerGatewayMutex sync.RWMutex + registerGatewayArgsForCall []struct { + arg1 context.Context + arg2 *graph.Gateway + arg3 string + } + registerGatewayReturns struct { + result1 error + } + registerGatewayReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeProvisioner) RegisterGateway(arg1 context.Context, arg2 *graph.Gateway, arg3 string) error { + fake.registerGatewayMutex.Lock() + ret, specificReturn := fake.registerGatewayReturnsOnCall[len(fake.registerGatewayArgsForCall)] + fake.registerGatewayArgsForCall = append(fake.registerGatewayArgsForCall, struct { + arg1 context.Context + arg2 *graph.Gateway + arg3 string + }{arg1, arg2, arg3}) + stub := fake.RegisterGatewayStub + fakeReturns := fake.registerGatewayReturns + fake.recordInvocation("RegisterGateway", []interface{}{arg1, arg2, arg3}) + fake.registerGatewayMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeProvisioner) RegisterGatewayCallCount() int { + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + return len(fake.registerGatewayArgsForCall) +} + +func (fake *FakeProvisioner) RegisterGatewayCalls(stub func(context.Context, *graph.Gateway, string) error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = stub +} + +func (fake *FakeProvisioner) RegisterGatewayArgsForCall(i int) (context.Context, *graph.Gateway, string) { + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + argsForCall := fake.registerGatewayArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeProvisioner) RegisterGatewayReturns(result1 error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = nil + fake.registerGatewayReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeProvisioner) RegisterGatewayReturnsOnCall(i int, result1 error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = nil + if fake.registerGatewayReturnsOnCall == nil { + fake.registerGatewayReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.registerGatewayReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeProvisioner) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeProvisioner) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ provisioner.Provisioner = new(FakeProvisioner) diff --git a/internal/mode/static/provisioner/setter.go b/internal/mode/static/provisioner/setter.go new file mode 100644 index 0000000000..eff556e434 --- /dev/null +++ b/internal/mode/static/provisioner/setter.go @@ -0,0 +1,133 @@ +package provisioner + +import ( + "maps" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// objectSpecSetter sets the spec of the provided object. This is used when creating or updating the object. +func objectSpecSetter(object client.Object) controllerutil.MutateFn { + switch obj := object.(type) { + case *appsv1.Deployment: + return deploymentSpecSetter(obj, obj.Spec, obj.ObjectMeta) + case *corev1.Service: + return serviceSpecSetter(obj, obj.Spec, obj.ObjectMeta) + case *corev1.ServiceAccount: + return serviceAccountSpecSetter(obj, obj.ObjectMeta) + case *corev1.ConfigMap: + return configMapSpecSetter(obj, obj.Data, obj.ObjectMeta) + case *corev1.Secret: + return secretSpecSetter(obj, obj.Data, obj.ObjectMeta) + case *rbacv1.Role: + return roleSpecSetter(obj, obj.Rules, obj.ObjectMeta) + case *rbacv1.RoleBinding: + return roleBindingSpecSetter(obj, obj.RoleRef, obj.Subjects, obj.ObjectMeta) + } + + return nil +} + +func deploymentSpecSetter( + deployment *appsv1.Deployment, + spec appsv1.DeploymentSpec, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + deployment.Labels = objectMeta.Labels + deployment.Annotations = objectMeta.Annotations + deployment.Spec = spec + return nil + } +} + +func serviceSpecSetter( + service *corev1.Service, + spec corev1.ServiceSpec, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + service.Labels = objectMeta.Labels + service.Annotations = objectMeta.Annotations + service.Spec = spec + return nil + } +} + +func serviceAccountSpecSetter( + serviceAccount *corev1.ServiceAccount, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + serviceAccount.Labels = objectMeta.Labels + serviceAccount.Annotations = objectMeta.Annotations + return nil + } +} + +func configMapSpecSetter( + configMap *corev1.ConfigMap, + data map[string]string, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + // this check ensures we don't trigger an unnecessary update to the agent ConfigMap + // and trigger a Deployment restart + if maps.Equal(configMap.Labels, objectMeta.Labels) && + maps.Equal(configMap.Annotations, objectMeta.Annotations) && + maps.Equal(configMap.Data, data) { + return nil + } + + configMap.Labels = objectMeta.Labels + configMap.Annotations = objectMeta.Annotations + configMap.Data = data + return nil + } +} + +func secretSpecSetter( + secret *corev1.Secret, + data map[string][]byte, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + secret.Labels = objectMeta.Labels + secret.Annotations = objectMeta.Annotations + secret.Data = data + return nil + } +} + +func roleSpecSetter( + role *rbacv1.Role, + rules []rbacv1.PolicyRule, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + role.Labels = objectMeta.Labels + role.Annotations = objectMeta.Annotations + role.Rules = rules + return nil + } +} + +func roleBindingSpecSetter( + roleBinding *rbacv1.RoleBinding, + roleRef rbacv1.RoleRef, + subjects []rbacv1.Subject, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + roleBinding.Labels = objectMeta.Labels + roleBinding.Annotations = objectMeta.Annotations + roleBinding.RoleRef = roleRef + roleBinding.Subjects = subjects + return nil + } +} diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go new file mode 100644 index 0000000000..a487d29fb9 --- /dev/null +++ b/internal/mode/static/provisioner/store.go @@ -0,0 +1,422 @@ +package provisioner + +import ( + "reflect" + "strings" + "sync" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +// NginxResources are all of the NGINX resources deployed in relation to a Gateway. +type NginxResources struct { + Gateway *graph.Gateway + Deployment metav1.ObjectMeta + Service metav1.ObjectMeta + ServiceAccount metav1.ObjectMeta + Role metav1.ObjectMeta + RoleBinding metav1.ObjectMeta + BootstrapConfigMap metav1.ObjectMeta + AgentConfigMap metav1.ObjectMeta + AgentTLSSecret metav1.ObjectMeta + PlusJWTSecret metav1.ObjectMeta + PlusClientSSLSecret metav1.ObjectMeta + PlusCASecret metav1.ObjectMeta + DockerSecrets []metav1.ObjectMeta +} + +// store stores the cluster state needed by the provisioner and allows to update it from the events. +type store struct { + // gateways is a map of all Gateway resources in the cluster. Used on startup to determine + // which nginx resources aren't tied to any Gateways and need to be cleaned up. + gateways map[types.NamespacedName]*gatewayv1.Gateway + // nginxResources is a map of Gateway NamespacedNames and their associated nginx resources. + nginxResources map[types.NamespacedName]*NginxResources + + dockerSecretNames map[string]struct{} + agentTLSSecretName string + + // NGINX Plus secrets + jwtSecretName string + caSecretName string + clientSSLSecretName string + + lock sync.RWMutex +} + +func newStore( + dockerSecretNames []string, + agentTLSSecretName, + jwtSecretName, + caSecretName, + clientSSLSecretName string, +) *store { + dockerSecretNamesMap := make(map[string]struct{}) + for _, name := range dockerSecretNames { + dockerSecretNamesMap[name] = struct{}{} + } + + return &store{ + gateways: make(map[types.NamespacedName]*gatewayv1.Gateway), + nginxResources: make(map[types.NamespacedName]*NginxResources), + dockerSecretNames: dockerSecretNamesMap, + agentTLSSecretName: agentTLSSecretName, + jwtSecretName: jwtSecretName, + caSecretName: caSecretName, + clientSSLSecretName: clientSSLSecretName, + } +} + +func (s *store) updateGateway(obj *gatewayv1.Gateway) { + s.lock.Lock() + defer s.lock.Unlock() + + s.gateways[client.ObjectKeyFromObject(obj)] = obj +} + +func (s *store) deleteGateway(nsName types.NamespacedName) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.gateways, nsName) +} + +func (s *store) getGateway(nsName types.NamespacedName) *gatewayv1.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.gateways[nsName] +} + +func (s *store) getGateways() map[types.NamespacedName]*gatewayv1.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.gateways +} + +// registerResourceInGatewayConfig adds or updates the provided resource in the tracking map. +// If the object being updated is the Gateway, check if anything that we care about changed. This ensures that +// we don't attempt to update nginx resources when the main event handler triggers this call with an unrelated event +// (like a Route update) that shouldn't result in nginx resource changes. +func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedName, object any) bool { + s.lock.Lock() + defer s.lock.Unlock() + + switch obj := object.(type) { + case *graph.Gateway: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Gateway: obj, + } + } else { + changed := gatewayChanged(cfg.Gateway, obj) + cfg.Gateway = obj + return changed + } + case *appsv1.Deployment: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Deployment: obj.ObjectMeta, + } + } else { + cfg.Deployment = obj.ObjectMeta + } + case *corev1.Service: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Service: obj.ObjectMeta, + } + } else { + cfg.Service = obj.ObjectMeta + } + case *corev1.ServiceAccount: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + ServiceAccount: obj.ObjectMeta, + } + } else { + cfg.ServiceAccount = obj.ObjectMeta + } + case *rbacv1.Role: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Role: obj.ObjectMeta, + } + } else { + cfg.Role = obj.ObjectMeta + } + case *rbacv1.RoleBinding: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + RoleBinding: obj.ObjectMeta, + } + } else { + cfg.RoleBinding = obj.ObjectMeta + } + case *corev1.ConfigMap: + s.registerConfigMapInGatewayConfig(obj, gatewayNSName) + case *corev1.Secret: + s.registerSecretInGatewayConfig(obj, gatewayNSName) + } + + return true +} + +func (s *store) registerConfigMapInGatewayConfig(obj *corev1.ConfigMap, gatewayNSName types.NamespacedName) { + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + BootstrapConfigMap: obj.ObjectMeta, + } + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + AgentConfigMap: obj.ObjectMeta, + } + } + } else { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + cfg.BootstrapConfigMap = obj.ObjectMeta + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + cfg.AgentConfigMap = obj.ObjectMeta + } + } +} + +//nolint:gocyclo // will refactor at some point +func (s *store) registerSecretInGatewayConfig(obj *corev1.Secret, gatewayNSName types.NamespacedName) { + hasSuffix := func(str, suffix string) bool { + return suffix != "" && strings.HasSuffix(str, suffix) + } + + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + switch { + case hasSuffix(obj.GetName(), s.agentTLSSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + AgentTLSSecret: obj.ObjectMeta, + } + case hasSuffix(obj.GetName(), s.jwtSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusJWTSecret: obj.ObjectMeta, + } + case hasSuffix(obj.GetName(), s.caSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusCASecret: obj.ObjectMeta, + } + case hasSuffix(obj.GetName(), s.clientSSLSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusClientSSLSecret: obj.ObjectMeta, + } + } + + for secret := range s.dockerSecretNames { + if hasSuffix(obj.GetName(), secret) { + s.nginxResources[gatewayNSName] = &NginxResources{ + DockerSecrets: []metav1.ObjectMeta{obj.ObjectMeta}, + } + break + } + } + } else { + switch { + case hasSuffix(obj.GetName(), s.agentTLSSecretName): + cfg.AgentTLSSecret = obj.ObjectMeta + case hasSuffix(obj.GetName(), s.jwtSecretName): + cfg.PlusJWTSecret = obj.ObjectMeta + case hasSuffix(obj.GetName(), s.caSecretName): + cfg.PlusCASecret = obj.ObjectMeta + case hasSuffix(obj.GetName(), s.clientSSLSecretName): + cfg.PlusClientSSLSecret = obj.ObjectMeta + } + + for secret := range s.dockerSecretNames { + if hasSuffix(obj.GetName(), secret) { + if len(cfg.DockerSecrets) == 0 { + cfg.DockerSecrets = []metav1.ObjectMeta{obj.ObjectMeta} + } else { + cfg.DockerSecrets = append(cfg.DockerSecrets, obj.ObjectMeta) + } + } + } + } +} + +func gatewayChanged(original, updated *graph.Gateway) bool { + if original == nil { + return true + } + + if original.Valid != updated.Valid { + return true + } + + if !reflect.DeepEqual(original.Source, updated.Source) { + return true + } + + return !reflect.DeepEqual(original.EffectiveNginxProxy, updated.EffectiveNginxProxy) +} + +func (s *store) getNginxResourcesForGateway(nsName types.NamespacedName) *NginxResources { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.nginxResources[nsName] +} + +func (s *store) deleteResourcesForGateway(nsName types.NamespacedName) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.nginxResources, nsName) +} + +//nolint:gocyclo // will refactor at some point +func (s *store) gatewayExistsForResource(object client.Object, nsName types.NamespacedName) *graph.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + for _, resources := range s.nginxResources { + switch object.(type) { + case *appsv1.Deployment: + if resourceMatches(resources.Deployment, nsName) { + return resources.Gateway + } + case *corev1.Service: + if resourceMatches(resources.Service, nsName) { + return resources.Gateway + } + case *corev1.ServiceAccount: + if resourceMatches(resources.ServiceAccount, nsName) { + return resources.Gateway + } + case *rbacv1.Role: + if resourceMatches(resources.Role, nsName) { + return resources.Gateway + } + case *rbacv1.RoleBinding: + if resourceMatches(resources.RoleBinding, nsName) { + return resources.Gateway + } + case *corev1.ConfigMap: + if resourceMatches(resources.BootstrapConfigMap, nsName) { + return resources.Gateway + } + if resourceMatches(resources.AgentConfigMap, nsName) { + return resources.Gateway + } + case *corev1.Secret: + if secretResourceMatches(resources, nsName) { + return resources.Gateway + } + } + } + + return nil +} + +func secretResourceMatches(resources *NginxResources, nsName types.NamespacedName) bool { + if resourceMatches(resources.AgentTLSSecret, nsName) { + return true + } + + for _, secret := range resources.DockerSecrets { + if resourceMatches(secret, nsName) { + return true + } + } + + if resourceMatches(resources.PlusJWTSecret, nsName) { + return true + } + + if resourceMatches(resources.PlusClientSSLSecret, nsName) { + return true + } + + return resourceMatches(resources.PlusCASecret, nsName) +} + +func resourceMatches(objMeta metav1.ObjectMeta, nsName types.NamespacedName) bool { + return objMeta.GetName() == nsName.Name && objMeta.GetNamespace() == nsName.Namespace +} + +func (s *store) getResourceVersionForObject(gatewayNSName types.NamespacedName, object client.Object) string { + s.lock.RLock() + defer s.lock.RUnlock() + + resources, exists := s.nginxResources[gatewayNSName] + if !exists { + return "" + } + + switch obj := object.(type) { + case *appsv1.Deployment: + if resources.Deployment.GetName() == obj.GetName() { + return resources.Deployment.GetResourceVersion() + } + case *corev1.Service: + if resources.Service.GetName() == obj.GetName() { + return resources.Service.GetResourceVersion() + } + case *corev1.ServiceAccount: + if resources.ServiceAccount.GetName() == obj.GetName() { + return resources.ServiceAccount.GetResourceVersion() + } + case *rbacv1.Role: + if resources.Role.GetName() == obj.GetName() { + return resources.Role.GetResourceVersion() + } + case *rbacv1.RoleBinding: + if resources.RoleBinding.GetName() == obj.GetName() { + return resources.RoleBinding.GetResourceVersion() + } + case *corev1.ConfigMap: + return getResourceVersionForConfigMap(resources, obj) + case *corev1.Secret: + return getResourceVersionForSecret(resources, obj) + } + + return "" +} + +func getResourceVersionForConfigMap(resources *NginxResources, configmap *corev1.ConfigMap) string { + if resources.BootstrapConfigMap.GetName() == configmap.GetName() { + return resources.BootstrapConfigMap.GetResourceVersion() + } + if resources.AgentConfigMap.GetName() == configmap.GetName() { + return resources.AgentConfigMap.GetResourceVersion() + } + + return "" +} + +func getResourceVersionForSecret(resources *NginxResources, secret *corev1.Secret) string { + if resources.AgentTLSSecret.GetName() == secret.GetName() { + return resources.AgentTLSSecret.GetResourceVersion() + } + for _, dockerSecret := range resources.DockerSecrets { + if dockerSecret.GetName() == secret.GetName() { + return dockerSecret.GetResourceVersion() + } + } + if resources.PlusJWTSecret.GetName() == secret.GetName() { + return resources.PlusJWTSecret.GetResourceVersion() + } + if resources.PlusClientSSLSecret.GetName() == secret.GetName() { + return resources.PlusClientSSLSecret.GetResourceVersion() + } + if resources.PlusCASecret.GetName() == secret.GetName() { + return resources.PlusCASecret.GetResourceVersion() + } + + return "" +} diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go new file mode 100644 index 0000000000..bc52728631 --- /dev/null +++ b/internal/mode/static/provisioner/store_test.go @@ -0,0 +1,838 @@ +package provisioner + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +func TestNewStore(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore([]string{"docker-secret"}, "agent-tls-secret", "jwt-secret", "ca-secret", "client-ssl-secret") + + g.Expect(store).NotTo(BeNil()) + g.Expect(store.dockerSecretNames).To(HaveKey("docker-secret")) + g.Expect(store.agentTLSSecretName).To(Equal("agent-tls-secret")) + g.Expect(store.jwtSecretName).To(Equal("jwt-secret")) + g.Expect(store.caSecretName).To(Equal("ca-secret")) + g.Expect(store.clientSSLSecretName).To(Equal("client-ssl-secret")) +} + +func TestUpdateGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "", "") + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + Namespace: "default", + }, + } + nsName := client.ObjectKeyFromObject(gateway) + + store.updateGateway(gateway) + + g.Expect(store.gateways).To(HaveKey(nsName)) + g.Expect(store.getGateway(nsName)).To(Equal(gateway)) +} + +func TestDeleteGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.gateways[nsName] = &gatewayv1.Gateway{} + + store.deleteGateway(nsName) + + g.Expect(store.gateways).NotTo(HaveKey(nsName)) + g.Expect(store.getGateway(nsName)).To(BeNil()) +} + +func TestGetGateways(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "", "") + gateway1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway-1", + Namespace: "default", + }, + } + gateway2 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway-2", + Namespace: "default", + }, + } + nsName1 := client.ObjectKeyFromObject(gateway1) + nsName2 := client.ObjectKeyFromObject(gateway2) + + store.updateGateway(gateway1) + store.updateGateway(gateway2) + + gateways := store.getGateways() + + g.Expect(gateways).To(HaveKey(nsName1)) + g.Expect(gateways).To(HaveKey(nsName2)) + g.Expect(gateways[nsName1]).To(Equal(gateway1)) + g.Expect(gateways[nsName2]).To(Equal(gateway2)) +} + +func TestRegisterResourceInGatewayConfig(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore([]string{"docker-secret"}, "agent-tls-secret", "jwt-secret", "ca-secret", "client-ssl-secret") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + + registerAndGetResources := func(obj interface{}) *NginxResources { + changed := store.registerResourceInGatewayConfig(nsName, obj) + g.Expect(changed).To(BeTrue(), fmt.Sprintf("failed: %T", obj)) + g.Expect(store.nginxResources).To(HaveKey(nsName), fmt.Sprintf("failed: %T", obj)) + + return store.getNginxResourcesForGateway(nsName) + } + + // Gateway, new config + gw := &graph.Gateway{} + resources := registerAndGetResources(gw) + g.Expect(resources.Gateway).To(Equal(gw)) + + // Gateway, updated config + gw = &graph.Gateway{ + Valid: true, + } + resources = registerAndGetResources(gw) + g.Expect(resources.Gateway).To(Equal(gw)) + + defaultMeta := metav1.ObjectMeta{ + Name: "test-resource", + Namespace: "default", + } + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Deployment + dep := &appsv1.Deployment{ObjectMeta: defaultMeta} + resources = registerAndGetResources(dep) + g.Expect(resources.Deployment).To(Equal(defaultMeta)) + + // Deployment again, already exists + resources = registerAndGetResources(dep) + g.Expect(resources.Deployment).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Service + svc := &corev1.Service{ObjectMeta: defaultMeta} + resources = registerAndGetResources(svc) + g.Expect(resources.Service).To(Equal(defaultMeta)) + + // Service again, already exists + resources = registerAndGetResources(svc) + g.Expect(resources.Service).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ServiceAccount + svcAcct := &corev1.ServiceAccount{ObjectMeta: defaultMeta} + resources = registerAndGetResources(svcAcct) + g.Expect(resources.ServiceAccount).To(Equal(defaultMeta)) + + // ServiceAccount again, already exists + resources = registerAndGetResources(svcAcct) + g.Expect(resources.ServiceAccount).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Role + role := &rbacv1.Role{ObjectMeta: defaultMeta} + resources = registerAndGetResources(role) + g.Expect(resources.Role).To(Equal(defaultMeta)) + + // Role again, already exists + resources = registerAndGetResources(role) + g.Expect(resources.Role).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // RoleBinding + roleBinding := &rbacv1.RoleBinding{ObjectMeta: defaultMeta} + resources = registerAndGetResources(roleBinding) + g.Expect(resources.RoleBinding).To(Equal(defaultMeta)) + + // RoleBinding again, already exists + resources = registerAndGetResources(roleBinding) + g.Expect(resources.RoleBinding).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ConfigMap + bootstrapCMMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, nginxIncludesConfigMapNameSuffix), + Namespace: defaultMeta.Namespace, + } + bootstrapCM := &corev1.ConfigMap{ObjectMeta: bootstrapCMMeta} + resources = registerAndGetResources(bootstrapCM) + g.Expect(resources.BootstrapConfigMap).To(Equal(bootstrapCMMeta)) + + // ConfigMap again, already exists + resources = registerAndGetResources(bootstrapCM) + g.Expect(resources.BootstrapConfigMap).To(Equal(bootstrapCMMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ConfigMap + agentCMMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, nginxAgentConfigMapNameSuffix), + Namespace: defaultMeta.Namespace, + } + agentCM := &corev1.ConfigMap{ObjectMeta: agentCMMeta} + resources = registerAndGetResources(agentCM) + g.Expect(resources.AgentConfigMap).To(Equal(agentCMMeta)) + + // ConfigMap again, already exists + resources = registerAndGetResources(agentCM) + g.Expect(resources.AgentConfigMap).To(Equal(agentCMMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + agentTLSSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.agentTLSSecretName), + Namespace: defaultMeta.Namespace, + } + agentTLSSecret := &corev1.Secret{ObjectMeta: agentTLSSecretMeta} + resources = registerAndGetResources(agentTLSSecret) + g.Expect(resources.AgentTLSSecret).To(Equal(agentTLSSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(agentTLSSecret) + g.Expect(resources.AgentTLSSecret).To(Equal(agentTLSSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + jwtSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.jwtSecretName), + Namespace: defaultMeta.Namespace, + } + jwtSecret := &corev1.Secret{ObjectMeta: jwtSecretMeta} + resources = registerAndGetResources(jwtSecret) + g.Expect(resources.PlusJWTSecret).To(Equal(jwtSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(jwtSecret) + g.Expect(resources.PlusJWTSecret).To(Equal(jwtSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + caSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.caSecretName), + Namespace: defaultMeta.Namespace, + } + caSecret := &corev1.Secret{ObjectMeta: caSecretMeta} + resources = registerAndGetResources(caSecret) + g.Expect(resources.PlusCASecret).To(Equal(caSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(caSecret) + g.Expect(resources.PlusCASecret).To(Equal(caSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + clientSSLSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.clientSSLSecretName), + Namespace: defaultMeta.Namespace, + } + clientSSLSecret := &corev1.Secret{ObjectMeta: clientSSLSecretMeta} + resources = registerAndGetResources(clientSSLSecret) + g.Expect(resources.PlusClientSSLSecret).To(Equal(clientSSLSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(clientSSLSecret) + g.Expect(resources.PlusClientSSLSecret).To(Equal(clientSSLSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Docker Secret + dockerSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, "docker-secret"), + Namespace: defaultMeta.Namespace, + } + dockerSecret := &corev1.Secret{ObjectMeta: dockerSecretMeta} + resources = registerAndGetResources(dockerSecret) + g.Expect(resources.DockerSecrets).To(ContainElements(dockerSecretMeta)) + + // Docker Secret again, already exists + resources = registerAndGetResources(dockerSecret) + g.Expect(resources.DockerSecrets).To(ContainElement(dockerSecretMeta)) +} + +func TestGatewayChanged(t *testing.T) { + t.Parallel() + + tests := []struct { + original *graph.Gateway + updated *graph.Gateway + name string + changed bool + }{ + { + name: "nil gateway", + original: nil, + changed: true, + }, + { + name: "valid field changes", + original: &graph.Gateway{Valid: true}, + updated: &graph.Gateway{Valid: false}, + changed: true, + }, + { + name: "source changes", + original: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + updated: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 81, + }, + }, + }, + }}, + changed: true, + }, + { + name: "effective nginx proxy config changes", + original: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](1), + }, + }, + }, + }, + updated: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](2), + }, + }, + }, + }, + changed: true, + }, + { + name: "no changes", + original: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + updated: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + g.Expect(gatewayChanged(test.original, test.updated)).To(Equal(test.changed)) + }) + } +} + +func TestDeleteResourcesForGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.nginxResources[nsName] = &NginxResources{} + + store.deleteResourcesForGateway(nsName) + + g.Expect(store.nginxResources).NotTo(HaveKey(nsName)) +} + +func TestGatewayExistsForResource(t *testing.T) { + t.Parallel() + + store := newStore(nil, "", "", "", "") + gateway := &graph.Gateway{} + store.nginxResources[types.NamespacedName{Name: "test-gateway", Namespace: "default"}] = &NginxResources{ + Gateway: gateway, + Deployment: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Service: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + ServiceAccount: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + Role: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + RoleBinding: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, + BootstrapConfigMap: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + AgentConfigMap: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + AgentTLSSecret: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, + PlusJWTSecret: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + PlusCASecret: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + PlusClientSSLSecret: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + DockerSecrets: []metav1.ObjectMeta{ + { + Name: "test-docker-secret", + Namespace: "default", + }, + }, + } + + tests := []struct { + expected *graph.Gateway + object client.Object + name string + }{ + { + name: "Deployment exists", + object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Service exists", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "ServiceAccount exists", + object: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Role exists", + object: &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "RoleBinding exists", + object: &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Bootstrap ConfigMap exists", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Agent ConfigMap exists", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Agent TLS Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "JWT Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "CA Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Client SSL Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Docker Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-docker-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Resource does not exist", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-service", + Namespace: "default", + }, + }, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + result := store.gatewayExistsForResource(test.object, client.ObjectKeyFromObject(test.object)) + g.Expect(result).To(Equal(test.expected)) + }) + } +} + +func TestGetResourceVersionForObject(t *testing.T) { + t.Parallel() + + store := newStore(nil, "", "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.nginxResources[nsName] = &NginxResources{ + Deployment: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + ResourceVersion: "1", + }, + Service: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + ResourceVersion: "2", + }, + ServiceAccount: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + ResourceVersion: "3", + }, + Role: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + ResourceVersion: "4", + }, + RoleBinding: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + ResourceVersion: "5", + }, + BootstrapConfigMap: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + ResourceVersion: "6", + }, + AgentConfigMap: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + ResourceVersion: "7", + }, + AgentTLSSecret: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + ResourceVersion: "8", + }, + PlusJWTSecret: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + ResourceVersion: "9", + }, + PlusCASecret: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + ResourceVersion: "10", + }, + PlusClientSSLSecret: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + ResourceVersion: "11", + }, + DockerSecrets: []metav1.ObjectMeta{ + { + Name: "test-docker-secret", + Namespace: "default", + ResourceVersion: "12", + }, + }, + } + + tests := []struct { + name string + object client.Object + expectedResult string + }{ + { + name: "Deployment resource version", + object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + }, + expectedResult: "1", + }, + { + name: "Service resource version", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + expectedResult: "2", + }, + { + name: "ServiceAccount resource version", + object: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + }, + expectedResult: "3", + }, + { + name: "Role resource version", + object: &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + }, + expectedResult: "4", + }, + { + name: "RoleBinding resource version", + object: &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, + }, + expectedResult: "5", + }, + { + name: "Bootstrap ConfigMap resource version", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + }, + expectedResult: "6", + }, + { + name: "Agent ConfigMap resource version", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + }, + expectedResult: "7", + }, + { + name: "Agent TLS Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, + }, + expectedResult: "8", + }, + { + name: "JWT Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + }, + expectedResult: "9", + }, + { + name: "CA Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + }, + expectedResult: "10", + }, + { + name: "Client SSL Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + }, + expectedResult: "11", + }, + { + name: "Docker Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-docker-secret", + Namespace: "default", + }, + }, + expectedResult: "12", + }, + { + name: "Non-existent resource", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-service", + Namespace: "default", + }, + }, + expectedResult: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + result := store.getResourceVersionForObject(nsName, test.object) + g.Expect(result).To(Equal(test.expectedResult)) + }) + } +} diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go new file mode 100644 index 0000000000..87a667ef87 --- /dev/null +++ b/internal/mode/static/provisioner/templates.go @@ -0,0 +1,79 @@ +package provisioner + +import gotemplate "text/template" + +var ( + mainTemplate = gotemplate.Must(gotemplate.New("main").Parse(mainTemplateText)) + mgmtTemplate = gotemplate.Must(gotemplate.New("mgmt").Parse(mgmtTemplateText)) + agentTemplate = gotemplate.Must(gotemplate.New("agent").Parse(agentTemplateText)) +) + +const mainTemplateText = ` +error_log stderr {{ .ErrorLevel }};` + +const mgmtTemplateText = `mgmt { + {{- if .UsageEndpoint }} + usage_report endpoint={{ .UsageEndpoint }}; + {{- end }} + {{- if .SkipVerify }} + ssl_verify off; + {{- end }} + {{- if .UsageCASecret }} + ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; + {{- end }} + {{- if .UsageClientSSLSecret }} + ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; + ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; + {{- end }} + enforce_initial_report off; + deployment_context /etc/nginx/main-includes/deployment_ctx.json; +}` + +const agentTemplateText = `command: + server: + host: {{ .ServiceName }}.{{ .Namespace }}.svc + port: 443 + auth: + tokenpath: /var/run/secrets/ngf/serviceaccount/token + tls: + cert: /var/run/secrets/ngf/tls.crt + key: /var/run/secrets/ngf/tls.key + ca: /var/run/secrets/ngf/ca.crt + server_name: {{ .ServiceName }}.{{ .Namespace }}.svc +allowed_directories: +- /etc/nginx +- /usr/share/nginx +- /var/run/nginx +features: +- configuration +- certificates +{{- if .EnableMetrics }} +- metrics +{{- end }} +{{- if eq true .Plus }} +- api-action +{{- end }} +{{- if .LogLevel }} +log: + level: {{ .LogLevel }} +{{- end }} +{{- if .EnableMetrics }} +collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: {{ .MetricsPort }} +{{- end }}` diff --git a/internal/mode/static/state/change_processor.go b/internal/mode/static/state/change_processor.go index 3f5122df5a..1d136383b8 100644 --- a/internal/mode/static/state/change_processor.go +++ b/internal/mode/static/state/change_processor.go @@ -28,19 +28,6 @@ import ( //go:generate go tool counterfeiter -generate -// ChangeType is the type of change that occurred based on a k8s object event. -type ChangeType int - -const ( - // NoChange means that nothing changed. - NoChange ChangeType = iota - // EndpointsOnlyChange means that only the endpoints changed. - // If using NGINX Plus, this update can be done using the API without a reload. - EndpointsOnlyChange - // ClusterStateChange means that something other than endpoints changed. This requires an NGINX reload. - ClusterStateChange -) - //counterfeiter:generate . ChangeProcessor // ChangeProcessor processes the changes to resources and produces a graph-like representation @@ -55,8 +42,8 @@ type ChangeProcessor interface { // this ChangeProcessor was created for. CaptureDeleteChange(resourceType ngftypes.ObjectType, nsname types.NamespacedName) // Process produces a graph-like representation of GatewayAPI resources. - // If no changes were captured, the changed return argument will be NoChange and graph will be empty. - Process() (changeType ChangeType, graphCfg *graph.Graph) + // If no changes were captured, the graph will be empty. + Process() (graphCfg *graph.Graph) // GetLatestGraph returns the latest Graph. GetLatestGraph() *graph.Graph } @@ -69,8 +56,6 @@ type ChangeProcessorConfig struct { EventRecorder record.EventRecorder // MustExtractGVK is a function that extracts schema.GroupVersionKind from a client.Object. MustExtractGVK kinds.MustExtractGVK - // ProtectedPorts are the ports that may not be configured by a listener with a descriptive name of the ports. - ProtectedPorts graph.ProtectedPorts // PlusSecrets is a list of secret files used for NGINX Plus reporting (JWT, client SSL, CA). PlusSecrets map[types.NamespacedName][]graph.PlusSecretFile // Logger is the logger for this Change Processor. @@ -90,7 +75,7 @@ type ChangeProcessorImpl struct { // updater acts upon the cluster state. updater Updater // getAndResetClusterStateChanged tells if and how the cluster state has changed. - getAndResetClusterStateChanged func() ChangeType + getAndResetClusterStateChanged func() bool cfg ChangeProcessorConfig lock sync.Mutex @@ -109,7 +94,7 @@ func NewChangeProcessorImpl(cfg ChangeProcessorConfig) *ChangeProcessorImpl { CRDMetadata: make(map[types.NamespacedName]*metav1.PartialObjectMetadata), BackendTLSPolicies: make(map[types.NamespacedName]*v1alpha3.BackendTLSPolicy), ConfigMaps: make(map[types.NamespacedName]*apiv1.ConfigMap), - NginxProxies: make(map[types.NamespacedName]*ngfAPIv1alpha1.NginxProxy), + NginxProxies: make(map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy), GRPCRoutes: make(map[types.NamespacedName]*v1.GRPCRoute), TLSRoutes: make(map[types.NamespacedName]*v1alpha2.TLSRoute), NGFPolicies: make(map[graph.PolicyKey]policies.Policy), @@ -203,7 +188,7 @@ func NewChangeProcessorImpl(cfg ChangeProcessorConfig) *ChangeProcessorImpl { predicate: annotationChangedPredicate{annotation: gatewayclass.BundleVersionAnnotation}, }, { - gvk: cfg.MustExtractGVK(&ngfAPIv1alpha1.NginxProxy{}), + gvk: cfg.MustExtractGVK(&ngfAPIv1alpha2.NginxProxy{}), store: newObjectStoreMapAdapter(clusterStore.NginxProxies), predicate: funcPredicate{stateChanged: isReferenced}, }, @@ -270,13 +255,12 @@ func (c *ChangeProcessorImpl) CaptureDeleteChange(resourceType ngftypes.ObjectTy c.updater.Delete(resourceType, nsname) } -func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { +func (c *ChangeProcessorImpl) Process() *graph.Graph { c.lock.Lock() defer c.lock.Unlock() - changeType := c.getAndResetClusterStateChanged() - if changeType == NoChange { - return NoChange, nil + if !c.getAndResetClusterStateChanged() { + return nil } c.latestGraph = graph.BuildGraph( @@ -285,10 +269,9 @@ func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { c.cfg.GatewayClassName, c.cfg.PlusSecrets, c.cfg.Validators, - c.cfg.ProtectedPorts, ) - return changeType, c.latestGraph + return c.latestGraph } func (c *ChangeProcessorImpl) GetLatestGraph() *graph.Graph { diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index d71ebd5212..4797710fa3 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -58,18 +58,14 @@ func createHTTPRoute( CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpListenerName)), }, { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpsListenerName)), }, }, }, @@ -109,18 +105,14 @@ func createGRPCRoute( CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpListenerName)), }, { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpsListenerName)), }, }, }, @@ -156,11 +148,9 @@ func createTLSRoute(name, gateway, hostname string, backendRefs ...v1.BackendRef CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(tlsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(tlsListenerName)), }, }, }, @@ -178,7 +168,7 @@ func createTLSRoute(name, gateway, hostname string, backendRefs ...v1.BackendRef func createHTTPListener() v1.Listener { return v1.Listener{ - Name: httpListenerName, + Name: v1.SectionName(httpListenerName), Hostname: nil, Port: 80, Protocol: v1.HTTPProtocolType, @@ -410,6 +400,26 @@ var _ = Describe("ChangeProcessor", func() { processor state.ChangeProcessor ) + testUpsertTriggersChange := func(obj client.Object) { + processor.CaptureUpsertChange(obj) + Expect(processor.Process()).ToNot(BeNil()) + } + + testUpsertDoesNotTriggerChange := func(obj client.Object) { + processor.CaptureUpsertChange(obj) + Expect(processor.Process()).To(BeNil()) + } + + testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName) { + processor.CaptureDeleteChange(obj, nsname) + Expect(processor.Process()).ToNot(BeNil()) + } + + testDeleteDoesNotTriggerChange := func(obj client.Object, nsname types.NamespacedName) { + processor.CaptureDeleteChange(obj, nsname) + Expect(processor.Process()).To(BeNil()) + } + BeforeEach(OncePerOrdered, func() { processor = state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: controllerName, @@ -428,22 +438,21 @@ var _ = Describe("ChangeProcessor", func() { hr1, hr1Updated, hr2 *v1.HTTPRoute gr1, gr1Updated, gr2 *v1.GRPCRoute tr1, tr1Updated, tr2 *v1alpha2.TLSRoute - gw1, gw1Updated, gw2 *v1.Gateway + gw1, gw1Updated, gw2, gw2Updated *v1.Gateway secretRefGrant, hrServiceRefGrant *v1beta1.ReferenceGrant grServiceRefGrant, trServiceRefGrant *v1beta1.ReferenceGrant - expGraph *graph.Graph + expGraph, expGraph2 *graph.Graph expRouteHR1, expRouteHR2 *graph.L7Route expRouteGR1, expRouteGR2 *graph.L7Route expRouteTR1, expRouteTR2 *graph.L4Route gatewayAPICRD, gatewayAPICRDUpdated *metav1.PartialObjectMetadata - httpRouteKey1, httpRouteKey2, grpcRouteKey1, grpcRouteKey2 graph.RouteKey + httpRouteKey1, httpRouteKey2, grpcRouteKey1, grpcRouteKey2 graph.RouteKey // gitleaks:allow not a secret trKey1, trKey2 graph.L4RouteKey refSvc, refGRPCSvc, refTLSSvc types.NamespacedName ) processAndValidateGraph := func(expGraph *graph.Graph) { - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graphCfg := processor.Process() Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) } @@ -482,14 +491,16 @@ var _ = Describe("ChangeProcessor", func() { httpRouteKey1 = graph.CreateRouteKey(hr1) hr1Updated = hr1.DeepCopy() hr1Updated.Generation++ - hr2 = createHTTPRoute("hr-2", "gateway-2", "bar.example.com") + + hr2 = createHTTPRoute("hr-2", "gateway-2", "bar.example.com", crossNsHTTPBackendRef) httpRouteKey2 = graph.CreateRouteKey(hr2) gr1 = createGRPCRoute("gr-1", "gateway-1", "foo.example.com", grpcBackendRef) grpcRouteKey1 = graph.CreateRouteKey(gr1) gr1Updated = gr1.DeepCopy() gr1Updated.Generation++ - gr2 = createGRPCRoute("gr-2", "gateway-2", "bar.example.com") + + gr2 = createGRPCRoute("gr-2", "gateway-2", "bar.example.com", grpcBackendRef) grpcRouteKey2 = graph.CreateRouteKey(gr2) tlsBackendRef := createTLSBackendRef(refTLSSvc.Name, refTLSSvc.Namespace) @@ -497,6 +508,7 @@ var _ = Describe("ChangeProcessor", func() { trKey1 = graph.CreateRouteKeyL4(tr1) tr1Updated = tr1.DeepCopy() tr1Updated.Generation++ + tr2 = createTLSRoute("tr-2", "gateway-2", "bar.tls.com", tlsBackendRef) trKey2 = graph.CreateRouteKeyL4(tr2) @@ -645,6 +657,9 @@ var _ = Describe("ChangeProcessor", func() { createTLSListener(tlsListenerName), ) + gw2Updated = gw2.DeepCopy() + gw2Updated.Generation++ + gatewayAPICRD = &metav1.PartialObjectMetadata{ TypeMeta: metav1.TypeMeta{ Kind: "CustomResourceDefinition", @@ -668,20 +683,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: hr1.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, Idx: 1, SectionName: hr1.Spec.ParentRefs[1].SectionName, }, @@ -692,8 +721,9 @@ var _ = Describe("ChangeProcessor", func() { { BackendRefs: []graph.BackendRef{ { - SvcNsName: refSvc, - Weight: 1, + SvcNsName: refSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, ValidMatches: true, @@ -721,20 +751,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: hr2.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpsListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, Idx: 1, SectionName: hr2.Spec.ParentRefs[1].SectionName, }, @@ -743,18 +787,30 @@ var _ = Describe("ChangeProcessor", func() { Hostnames: hr2.Spec.Hostnames, Rules: []graph.RouteRule{ { + BackendRefs: []graph.BackendRef{ + { + SvcNsName: refSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + }, + }, ValidMatches: true, Filters: graph.RouteRuleFilters{ Valid: true, Filters: []graph.Filter{}, }, Matches: hr2.Spec.Rules[0].Matches, - RouteBackendRefs: []graph.RouteBackendRef{}, + RouteBackendRefs: createRouteBackendRefs(hr2.Spec.Rules[0].BackendRefs), }, }, }, Valid: true, Attachable: true, + Conditions: []conditions.Condition{ + staticConds.NewRouteBackendRefRefBackendNotFound( + "spec.rules[0].backendRefs[0].name: Not found: \"service\"", + ), + }, } expRouteGR1 = &graph.L7Route{ @@ -763,20 +819,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: gr1.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, Idx: 1, SectionName: gr1.Spec.ParentRefs[1].SectionName, }, @@ -787,8 +857,9 @@ var _ = Describe("ChangeProcessor", func() { { BackendRefs: []graph.BackendRef{ { - SvcNsName: refGRPCSvc, - Weight: 1, + SvcNsName: refGRPCSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, ValidMatches: true, @@ -816,20 +887,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: gr2.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpsListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, Idx: 1, SectionName: gr2.Spec.ParentRefs[1].SectionName, }, @@ -838,18 +923,30 @@ var _ = Describe("ChangeProcessor", func() { Hostnames: gr2.Spec.Hostnames, Rules: []graph.RouteRule{ { + BackendRefs: []graph.BackendRef{ + { + SvcNsName: refGRPCSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + }, + }, ValidMatches: true, Filters: graph.RouteRuleFilters{ Valid: true, Filters: []graph.Filter{}, }, Matches: graph.ConvertGRPCMatches(gr2.Spec.Rules[0].Matches), - RouteBackendRefs: []graph.RouteBackendRef{}, + RouteBackendRefs: createGRPCRouteBackendRefs(gr2.Spec.Rules[0].BackendRefs), }, }, }, Valid: true, Attachable: true, + Conditions: []conditions.Condition{ + staticConds.NewRouteBackendRefRefBackendNotFound( + "spec.rules[0].backendRefs[0].name: Not found: \"grpc-service\"", + ), + }, } expRouteTR1 = &graph.L4Route{ @@ -857,18 +954,26 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{tlsListenerName: {"foo.tls.com"}}, - Attached: true, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + tlsListenerName, + ): {"foo.tls.com"}, + }, + Attached: true, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: tr1.Spec.ParentRefs[0].SectionName, }, }, Spec: graph.L4RouteSpec{ Hostnames: tr1.Spec.Hostnames, BackendRef: graph.BackendRef{ - SvcNsName: refTLSSvc, - Valid: false, + SvcNsName: refTLSSvc, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Valid: true, @@ -885,18 +990,26 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{tlsListenerName: {"bar.tls.com"}}, - Attached: true, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + tlsListenerName, + ): {"bar.tls.com"}, + }, + Attached: true, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: tr2.Spec.ParentRefs[0].SectionName, }, }, Spec: graph.L4RouteSpec{ Hostnames: tr2.Spec.Hostnames, BackendRef: graph.BackendRef{ - SvcNsName: refTLSSvc, - Valid: false, + SvcNsName: refTLSSvc, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Valid: true, @@ -915,63 +1028,206 @@ var _ = Describe("ChangeProcessor", func() { Source: gc, Valid: true, }, - Gateway: &graph.Gateway{ - Source: gw1, - Listeners: []*graph.Listener{ - { - Name: httpListenerName, - Source: gw1.Spec.Listeners[0], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-1"}: { + Source: gw1, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, - }, - { - Name: httpsListenerName, - Source: gw1.Spec.Listeners[1], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, - }, - { - Name: tlsListenerName, - Source: gw1.Spec.Listeners[2], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-test-class", + }, }, - Valid: true, }, - IgnoredGateways: map[types.NamespacedName]*v1.Gateway{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, ReferencedSecrets: map[types.NamespacedName]*graph.Secret{}, ReferencedServices: map[types.NamespacedName]*graph.ReferencedService{ - refSvc: {}, - refTLSSvc: {}, - refGRPCSvc: {}, + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + }, + } + + expGraph2 = &graph.Graph{ + GatewayClass: &graph.GatewayClass{ + Source: gc, + Valid: true, + }, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-1"}: { + Source: gw1, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-test-class", + }, + }, + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + }, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + ReferencedSecrets: map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + client.ObjectKeyFromObject(diffNsTLSSecret): { + Source: diffNsTLSSecret, + CertBundle: diffNsTLSCert, + }, + }, + ReferencedServices: map[types.NamespacedName]*graph.ReferencedService{ + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, }, } }) When("no upsert has occurred", func() { It("returns nil graph", func() { - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(processor.GetLatestGraph()).To(BeNil()) }) @@ -1010,8 +1266,7 @@ var _ = Describe("ChangeProcessor", func() { It("returns nil graph", func() { processor.CaptureUpsertChange(diffNsTLSSecret) - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(&graph.Graph{}, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1022,9 +1277,10 @@ var _ = Describe("ChangeProcessor", func() { expGraph.GatewayClass = nil - expGraph.Gateway.Conditions = staticConds.NewGatewayInvalid("GatewayClass doesn't exist") - expGraph.Gateway.Valid = false - expGraph.Gateway.Listeners = nil + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + gw.Conditions = staticConds.NewGatewayInvalid("GatewayClass doesn't exist") + gw.Valid = false + gw.Listeners = nil // no ref grant exists yet for the routes expGraph.Routes[httpRouteKey1].Conditions = []conditions.Condition{ @@ -1051,23 +1307,23 @@ var _ = Describe("ChangeProcessor", func() { // gateway class does not exist so routes cannot attach expGraph.Routes[httpRouteKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[httpRouteKey1].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[grpcRouteKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[grpcRouteKey1].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.L4Routes[trKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.ReferencedSecrets = nil @@ -1088,7 +1344,8 @@ var _ = Describe("ChangeProcessor", func() { // No ref grant exists yet for gw1 // so the listener is not valid, but still attachable - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Valid = false listener443.ResolvedSecret = nil listener443.Conditions = staticConds.NewListenerRefNotPermitted( @@ -1097,7 +1354,10 @@ var _ = Describe("ChangeProcessor", func() { expAttachment80 := &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - httpListenerName: {"foo.example.com"}, + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -1105,13 +1365,16 @@ var _ = Describe("ChangeProcessor", func() { expAttachment443 := &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - httpsListenerName: {"foo.example.com"}, + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, }, Attached: true, ListenerPort: 443, } - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[httpRouteKey1].ParentRefs[0].Attachment = expAttachment80 listener443.Routes[httpRouteKey1].ParentRefs[1].Attachment = expAttachment443 listener80.Routes[grpcRouteKey1].ParentRefs[0].Attachment = expAttachment80 @@ -1119,22 +1382,22 @@ var _ = Describe("ChangeProcessor", func() { // no ref grant exists yet for hr1 expGraph.Routes[httpRouteKey1].Conditions = []conditions.Condition{ - staticConds.NewRouteInvalidListener(), staticConds.NewRouteBackendRefRefNotPermitted( "spec.rules[0].backendRefs[0].namespace: Forbidden: Backend ref to Service " + "service-ns/service not permitted by any ReferenceGrant", ), + staticConds.NewRouteInvalidListener(), } expGraph.Routes[httpRouteKey1].ParentRefs[0].Attachment = expAttachment80 expGraph.Routes[httpRouteKey1].ParentRefs[1].Attachment = expAttachment443 // no ref grant exists yet for gr1 expGraph.Routes[grpcRouteKey1].Conditions = []conditions.Condition{ - staticConds.NewRouteInvalidListener(), staticConds.NewRouteBackendRefRefNotPermitted( "spec.rules[0].backendRefs[0].namespace: Forbidden: Backend ref to Service " + "grpc-service-ns/grpc-service not permitted by any ReferenceGrant", ), + staticConds.NewRouteInvalidListener(), } expGraph.Routes[grpcRouteKey1].ParentRefs[0].Attachment = expAttachment80 expGraph.Routes[grpcRouteKey1].ParentRefs[1].Attachment = expAttachment443 @@ -1316,8 +1579,7 @@ var _ = Describe("ChangeProcessor", func() { gatewayclass.SupportedVersion, ) - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1339,10 +1601,11 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(hr1Updated) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Routes[httpRouteKey1].Source.SetGeneration(hr1Updated.Generation) - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[httpRouteKey1].Source.SetGeneration(hr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, @@ -1357,10 +1620,11 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gr1Updated) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Routes[grpcRouteKey1].Source.SetGeneration(gr1Updated.Generation) - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[grpcRouteKey1].Source.SetGeneration(gr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, @@ -1374,7 +1638,8 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(tr1Updated) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + tlsListener := getListenerByName(gw, tlsListenerName) tlsListener.L4Routes[trKey1].Source.SetGeneration(tr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ @@ -1389,7 +1654,8 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gw1Updated) - expGraph.Gateway.Source.Generation = gw1Updated.Generation + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + gw.Source.Generation = gw1Updated.Generation expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, CertBundle: diffNsTLSCert, @@ -1430,8 +1696,7 @@ var _ = Describe("ChangeProcessor", func() { CertBundle: diffNsTLSCert, } - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1445,125 +1710,124 @@ var _ = Describe("ChangeProcessor", func() { CertBundle: diffNsTLSCert, } - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) }) When("the second Gateway is upserted", func() { - It("returns populated graph using first gateway", func() { + It("returns populated graph with second gateway", func() { processor.CaptureUpsertChange(gw2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, - } - - processAndValidateGraph(expGraph) + processAndValidateGraph(expGraph2) }) }) When("the second HTTPRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(hr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, + expGraph2.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ + Source: diffNsTLSSecret, + CertBundle: diffNsTLSCert, } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the second GRPCRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.Routes[grpcRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, + grpcRouteKey2: expRouteGR2, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refGRPCSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the second TLSRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(tr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.Routes[grpcRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, } - expGraph.L4Routes[trKey2] = expRouteTR2 - expGraph.L4Routes[trKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, + grpcRouteKey2: expRouteGR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey1: expRouteTR1, + trKey2: expRouteTR2, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refGRPCSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refTLSSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the first Gateway is deleted", func() { @@ -1573,50 +1837,112 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "gateway-1"}, ) - // gateway 2 takes over; - // route 1 has been replaced by route 2 - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] - - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) - - listener80.Routes[httpRouteKey2] = expRouteHR2 - listener443.Routes[httpRouteKey2] = expRouteHR2 - listener80.Routes[grpcRouteKey2] = expRouteGR2 - listener443.Routes[grpcRouteKey2] = expRouteGR2 - tlsListener.L4Routes[trKey2] = expRouteTR2 - - delete(expGraph.Routes, httpRouteKey1) - delete(expGraph.Routes, grpcRouteKey1) - delete(expGraph.L4Routes, trKey1) - - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.L4Routes[trKey2] = expRouteTR2 - - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, - } - - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - processAndValidateGraph(expGraph) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + + processAndValidateGraph(expGraph2) }) }) When("the second HTTPRoute is deleted", func() { @@ -1626,49 +1952,103 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "hr-2"}, ) - // gateway 2 still in charge; - // no HTTP routes remain - // GRPCRoute 2 still exists - // TLSRoute 2 still exists - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] - listener80.Routes[grpcRouteKey2] = expRouteGR2 - listener443.Routes[grpcRouteKey2] = expRouteGR2 - tlsListener.L4Routes[trKey2] = expRouteTR2 + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, + } - delete(expGraph.Routes, httpRouteKey1) - delete(expGraph.Routes, grpcRouteKey1) - expGraph.Routes[grpcRouteKey2] = expRouteGR2 + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, + } - delete(expGraph.L4Routes, trKey1) - expGraph.L4Routes[trKey2] = expRouteTR2 + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, } - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the second GRPCRoute is deleted", func() { @@ -1678,42 +2058,92 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "gr-2"}, ) - // gateway 2 still in charge; - // no routes remain - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] - tlsListener.L4Routes[trKey2] = expRouteTR2 - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{} - delete(expGraph.L4Routes, trKey1) - expGraph.L4Routes[trKey2] = expRouteTR2 + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{} - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, } - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} - processAndValidateGraph(expGraph) + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the second TLSRoute is deleted", func() { @@ -1723,38 +2153,81 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "tr-2"}, ) - // gateway 2 still in charge; - // no HTTP or TLS routes remain - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{} - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, - } + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expGraph.ReferencedServices = nil + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices = nil + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the GatewayClass is deleted", func() { @@ -1764,20 +2237,40 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Name: gcName}, ) - expGraph.GatewayClass = nil - expGraph.Gateway = &graph.Gateway{ - Source: gw2, - Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + expGraph2.GatewayClass = nil + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway-2", + Generation: 1, + }, + Spec: v1.GatewaySpec{ + GatewayClassName: "test-class", + Listeners: []v1.Listener{ + createHTTPListener(), + createHTTPSListener(httpsListenerName, sameNsTLSSecret), + createTLSListener(tlsListenerName), + }, + }, + }, + Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, } - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - expGraph.ReferencedSecrets = nil + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} + expGraph2.ReferencedSecrets = nil expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expGraph.ReferencedServices = nil + expGraph2.ReferencedServices = nil - processAndValidateGraph(expGraph) + processAndValidateGraph(expGraph2) }) }) When("the second Gateway is deleted", func() { @@ -1932,58 +2425,43 @@ var _ = Describe("ChangeProcessor", func() { gw = createGateway("gw", createHTTPListener()) processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + gr := processor.Process() + Expect(gr).ToNot(BeNil()) }) - testProcessChangedVal := func(expChanged state.ChangeType) { - changed, _ := processor.Process() - Expect(changed).To(Equal(expChanged)) - } - - testUpsertTriggersChange := func(obj client.Object, expChanged state.ChangeType) { - processor.CaptureUpsertChange(obj) - testProcessChangedVal(expChanged) - } - - testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName, expChanged state.ChangeType) { - processor.CaptureDeleteChange(obj, nsname) - testProcessChangedVal(expChanged) - } - When("hr1 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1, state.ClusterStateChange) + testUpsertTriggersChange(hr1) }) }) When("a hr1 service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, state.ClusterStateChange) + testUpsertTriggersChange(hr1svc) }) }) When("a backendTLSPolicy is added for referenced service", func() { It("should trigger a change", func() { - testUpsertTriggersChange(btls, state.ClusterStateChange) + testUpsertTriggersChange(btls) }) }) When("an hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice1, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice1) }) }) When("an hr1 service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, state.ClusterStateChange) + testUpsertTriggersChange(hr1svc) }) }) When("another hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice2) }) }) When("an endpoint slice with a missing svc name label is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(missingSvcNameSlice, state.NoChange) + testUpsertDoesNotTriggerChange(missingSvcNameSlice) }) }) When("an hr1 endpoint slice is deleted", func() { @@ -1991,7 +2469,6 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice1, types.NamespacedName{Namespace: hr1slice1.Namespace, Name: hr1slice1.Name}, - state.EndpointsOnlyChange, ) }) }) @@ -2000,13 +2477,12 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - state.EndpointsOnlyChange, ) }) }) When("the second hr1 endpoint slice is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice2) }) }) When("hr1 is deleted", func() { @@ -2014,41 +2490,38 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1, types.NamespacedName{Namespace: hr1.Namespace, Name: hr1.Name}, - state.ClusterStateChange, ) }) }) When("hr1 service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( hr1svc, types.NamespacedName{Namespace: hr1svc.Namespace, Name: hr1svc.Name}, - state.NoChange, ) }) }) When("the second hr1 endpoint slice is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - state.NoChange, ) }) }) When("hr2 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr2, state.ClusterStateChange) + testUpsertTriggersChange(hr2) }) }) When("a hr3, that shares a backend service with hr2, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr3, state.ClusterStateChange) + testUpsertTriggersChange(hr3) }) }) When("sharedSvc, a service referenced by both hr2 and hr3, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) + testUpsertTriggersChange(sharedSvc) }) }) When("hr2 is deleted", func() { @@ -2056,7 +2529,6 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr2, types.NamespacedName{Namespace: hr2.Namespace, Name: hr2.Name}, - state.ClusterStateChange, ) }) }) @@ -2065,13 +2537,12 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - state.ClusterStateChange, ) }) }) When("sharedSvc is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) + testUpsertTriggersChange(sharedSvc) }) }) When("hr3 is deleted", func() { @@ -2079,62 +2550,59 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr3, types.NamespacedName{Namespace: hr3.Namespace, Name: hr3.Name}, - state.ClusterStateChange, ) }) }) When("sharedSvc is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - state.NoChange, ) }) }) When("a service that is not referenced by any route is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(notRefSvc, state.NoChange) + testUpsertDoesNotTriggerChange(notRefSvc) }) }) When("a route with an invalid backend ref type is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrInvalidBackendRef, state.ClusterStateChange) + testUpsertTriggersChange(hrInvalidBackendRef) }) }) When("a service with a namespace name that matches invalid backend ref is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(invalidSvc, state.NoChange) + testUpsertDoesNotTriggerChange(invalidSvc) }) }) When("an endpoint slice that is not owned by a referenced service is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(noRefSlice, state.NoChange) + testUpsertDoesNotTriggerChange(noRefSlice) }) }) When("an endpoint slice that is not owned by a referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( noRefSlice, types.NamespacedName{Namespace: noRefSlice.Namespace, Name: noRefSlice.Name}, - state.NoChange, ) }) }) Context("processing a route with multiple rules and three unique backend services", func() { When("route is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrMultipleRules, state.ClusterStateChange) + testUpsertTriggersChange(hrMultipleRules) }) }) When("first referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc1) }) }) When("second referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc2, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc2) }) }) When("first referenced service is deleted", func() { @@ -2142,23 +2610,22 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - state.ClusterStateChange, ) }) }) When("first referenced service is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc1) }) }) When("third referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc3) }) }) When("third referenced service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc3) }) }) When("route is deleted", func() { @@ -2169,34 +2636,30 @@ var _ = Describe("ChangeProcessor", func() { Namespace: hrMultipleRules.Namespace, Name: hrMultipleRules.Name, }, - state.ClusterStateChange, ) }) }) When("first referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - state.NoChange, ) }) }) When("second referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc2, types.NamespacedName{Namespace: bazSvc2.Namespace, Name: bazSvc2.Name}, - state.NoChange, ) }) }) When("final referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc3, types.NamespacedName{Namespace: bazSvc3.Namespace, Name: bazSvc3.Name}, - state.NoChange, ) }) }) @@ -2269,44 +2732,31 @@ var _ = Describe("ChangeProcessor", func() { When("a namespace is created that is not linked to a listener", func() { It("does not trigger an update", func() { - processor.CaptureUpsertChange(nsNoLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testUpsertDoesNotTriggerChange(nsNoLabels) }) }) When("a namespace is created that is linked to a listener", func() { It("triggers an update", func() { - processor.CaptureUpsertChange(ns) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(ns) }) }) When("a namespace is deleted that is not linked to a listener", func() { It("does not trigger an update", func() { - processor.CaptureDeleteChange(nsNoLabels, types.NamespacedName{Name: "no-labels"}) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testDeleteDoesNotTriggerChange(nsNoLabels, types.NamespacedName{Name: "no-labels"}) }) }) When("a namespace is deleted that is linked to a listener", func() { It("triggers an update", func() { - processor.CaptureDeleteChange(ns, types.NamespacedName{Name: "ns"}) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testDeleteTriggersChange(ns, types.NamespacedName{Name: "ns"}) }) }) When("a namespace that is not linked to a listener has its labels changed to match a listener", func() { It("triggers an update", func() { - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) - + testUpsertDoesNotTriggerChange(nsDifferentLabels) nsDifferentLabels.Labels = map[string]string{ "app": "allowed", } - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }) When( @@ -2316,9 +2766,7 @@ var _ = Describe("ChangeProcessor", func() { nsDifferentLabels.Labels = map[string]string{ "oranges": "bananas", } - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }, ) @@ -2329,9 +2777,7 @@ var _ = Describe("ChangeProcessor", func() { "oranges": "bananas", } gwChangedLabel.Generation++ - processor.CaptureUpsertChange(gwChangedLabel) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(gwChangedLabel) // After changing the gateway's labels and generation, the processor should be marked to update // the nginx configuration and build a new graph. When processor.Process() gets called, @@ -2340,84 +2786,157 @@ var _ = Describe("ChangeProcessor", func() { // the new labels on the gateway, it would not trigger a change as the namespace would no longer // be in the updated referencedNamespaces and the labels no longer match the new labels on the // gateway. - processor.CaptureUpsertChange(ns) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.NoChange)) - - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertDoesNotTriggerChange(ns) + testUpsertTriggersChange(nsDifferentLabels) }) }) When("a namespace that is not linked to a listener has its labels removed", func() { It("does not trigger an update", func() { ns.Labels = nil - processor.CaptureUpsertChange(ns) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testUpsertDoesNotTriggerChange(ns) }) }) When("a namespace that is linked to a listener has its labels removed", func() { It("triggers an update when labels are removed", func() { nsDifferentLabels.Labels = nil - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }) }) Describe("NginxProxy resource changes", Ordered, func() { - paramGC := gc.DeepCopy() - paramGC.Spec.ParametersRef = &v1beta1.ParametersReference{ - Group: ngfAPIv1alpha1.GroupName, - Kind: kinds.NginxProxy, - Name: "np", - } + Context("referenced by a GatewayClass", func() { + paramGC := gc.DeepCopy() + paramGC.Spec.ParametersRef = &v1beta1.ParametersReference{ + Group: ngfAPIv1alpha1.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + Namespace: helpers.GetPointer[v1.Namespace]("test"), + } - np := &ngfAPIv1alpha1.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np", - }, - } + np := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np", + Namespace: "test", + }, + } - npUpdated := &ngfAPIv1alpha1.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np", - }, - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-svc:123", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + npUpdated := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np", + Namespace: "test", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-svc:123"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, }, }, - }, - } - It("handles upserts for an NginxProxy", func() { - processor.CaptureUpsertChange(np) - processor.CaptureUpsertChange(paramGC) + } + It("handles upserts for an NginxProxy", func() { + processor.CaptureUpsertChange(np) + processor.CaptureUpsertChange(paramGC) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy.Source).To(Equal(np)) - }) - It("captures changes for an NginxProxy", func() { - processor.CaptureUpsertChange(npUpdated) - processor.CaptureUpsertChange(paramGC) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(np)) + }) + It("captures changes for an NginxProxy", func() { + processor.CaptureUpsertChange(npUpdated) + processor.CaptureUpsertChange(paramGC) + + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(npUpdated)) + }) + It("handles deletes for an NginxProxy", func() { + processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy.Source).To(Equal(npUpdated)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + Expect(graph.GatewayClass.NginxProxy).To(BeNil()) + }) }) - It("handles deletes for an NginxProxy", func() { - processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) + Context("referenced by a Gateway", func() { + paramGW := &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "param-gw", + Generation: 1, + }, + Spec: v1.GatewaySpec{ + GatewayClassName: gcName, + Listeners: []v1.Listener{ + { + Name: httpListenerName, + Hostname: nil, + Port: 80, + Protocol: v1.HTTPProtocolType, + }, + }, + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: ngfAPIv1alpha1.GroupName, + Kind: kinds.NginxProxy, + Name: "np-gw", + }, + }, + }, + } + + np := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-gw", + Namespace: "test", + }, + } + + npUpdated := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-gw", + Namespace: "test", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-svc:123"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, + }, + }, + } + It("handles upserts for an NginxProxy", func() { + processor.CaptureUpsertChange(np) + processor.CaptureUpsertChange(paramGW) + + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] + Expect(gw.NginxProxy.Source).To(Equal(np)) + }) + It("captures changes for an NginxProxy", func() { + processor.CaptureUpsertChange(npUpdated) + processor.CaptureUpsertChange(paramGW) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy).To(BeNil()) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] + Expect(gw.NginxProxy.Source).To(Equal(npUpdated)) + }) + It("handles deletes for an NginxProxy", func() { + processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) + + graph := processor.Process() + Expect(graph).ToNot(BeNil()) + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] + Expect(gw.NginxProxy).To(BeNil()) + }) }) }) @@ -2434,8 +2953,8 @@ var _ = Describe("ChangeProcessor", func() { BeforeAll(func() { processor.CaptureUpsertChange(gc) - changed, newGraph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + newGraph := processor.Process() + Expect(newGraph).ToNot(BeNil()) Expect(newGraph.GatewayClass.Source).To(Equal(gc)) Expect(newGraph.NGFPolicies).To(BeEmpty()) @@ -2565,29 +3084,28 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(obs) processor.CaptureUpsertChange(usp) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) }) When("the resource the policy references is created", func() { It("populates the graph with the policy", func() { processor.CaptureUpsertChange(gw) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(cspKey)) Expect(graph.NGFPolicies[cspKey].Source).To(Equal(csp)) Expect(graph.NGFPolicies).ToNot(HaveKey(obsKey)) processor.CaptureUpsertChange(route) - changed, graph = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph = processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(obsKey)) Expect(graph.NGFPolicies[obsKey].Source).To(Equal(obs)) processor.CaptureUpsertChange(svc) - changed, graph = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph = processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(uspKey)) Expect(graph.NGFPolicies[uspKey].Source).To(Equal(usp)) }) @@ -2598,8 +3116,8 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(obsUpdated) processor.CaptureUpsertChange(uspUpdated) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(cspKey)) Expect(graph.NGFPolicies[cspKey].Source).To(Equal(cspUpdated)) Expect(graph.NGFPolicies).To(HaveKey(obsKey)) @@ -2614,8 +3132,8 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&ngfAPIv1alpha2.ObservabilityPolicy{}, client.ObjectKeyFromObject(obs)) processor.CaptureDeleteChange(&ngfAPIv1alpha1.UpstreamSettingsPolicy{}, client.ObjectKeyFromObject(usp)) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(BeEmpty()) }) }) @@ -2663,8 +3181,8 @@ var _ = Describe("ChangeProcessor", func() { It("handles upserts for a SnippetsFilter", func() { processor.CaptureUpsertChange(sf) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) processedSf, exists := graph.SnippetsFilters[sfNsName] Expect(exists).To(BeTrue()) @@ -2674,8 +3192,8 @@ var _ = Describe("ChangeProcessor", func() { It("captures changes for a SnippetsFilter", func() { processor.CaptureUpsertChange(sfUpdated) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) processedSf, exists := graph.SnippetsFilters[sfNsName] Expect(exists).To(BeTrue()) @@ -2685,8 +3203,8 @@ var _ = Describe("ChangeProcessor", func() { It("handles deletes for a SnippetsFilter", func() { processor.CaptureDeleteChange(sfUpdated, sfNsName) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.SnippetsFilters).To(BeEmpty()) }) }) @@ -2710,7 +3228,7 @@ var _ = Describe("ChangeProcessor", func() { secret, secretUpdated, unrelatedSecret, barSecret, barSecretUpdated *apiv1.Secret cm, cmUpdated, unrelatedCM *apiv1.ConfigMap btls, btlsUpdated *v1alpha3.BackendTLSPolicy - np, npUpdated *ngfAPIv1alpha1.NginxProxy + np, npUpdated *ngfAPIv1alpha2.NginxProxy ) BeforeEach(OncePerOrdered, func() { @@ -3009,19 +3527,19 @@ var _ = Describe("ChangeProcessor", func() { btlsUpdated = btls.DeepCopy() npNsName = types.NamespacedName{Name: "np-1"} - np = &ngfAPIv1alpha1.NginxProxy{ + np = &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ Name: npNsName.Name, }, - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), }, }, } npUpdated = np.DeepCopy() }) - // Changing change - a change that makes processor.Process() report changed + // Changing change - a change that makes processor.Process() return a built graph // Non-changing change - a change that doesn't do that // Related resource - a K8s resource that is related to a configured Gateway API resource // Unrelated resource - a K8s resource that is not related to a configured Gateway API resource @@ -3029,7 +3547,7 @@ var _ = Describe("ChangeProcessor", func() { // Note: in these tests, we deliberately don't fully inspect the returned configuration and statuses // -- this is done in 'Normal cases of processing changes' Describe("Multiple Gateway API resource changes", Ordered, func() { - It("should report changed after multiple Upserts", func() { + It("should build graph after multiple Upserts", func() { processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw1) processor.CaptureUpsertChange(testNs) @@ -3040,11 +3558,10 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(cm) processor.CaptureUpsertChange(np) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) When("a upsert of updated resources is followed by an upsert of the same generation", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureUpsertChange(gcUpdated) processor.CaptureUpsertChange(gw1Updated) @@ -3065,22 +3582,20 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(cmUpdated) processor.CaptureUpsertChange(npUpdated) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) - It("should report changed after upserting new resources", func() { + It("should build graph after upserting new resources", func() { // we can't have a second GatewayClass, so we don't add it processor.CaptureUpsertChange(gw2) processor.CaptureUpsertChange(hr2) processor.CaptureUpsertChange(gr2) processor.CaptureUpsertChange(rg2) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) When("resources are deleted followed by upserts with the same generations", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureDeleteChange(&v1.GatewayClass{}, gcNsName) processor.CaptureDeleteChange(&v1.Gateway{}, gwNsName) @@ -3089,7 +3604,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&v1beta1.ReferenceGrant{}, rgNsName) processor.CaptureDeleteChange(&v1alpha3.BackendTLSPolicy{}, btlsNsName) processor.CaptureDeleteChange(&apiv1.ConfigMap{}, cmNsName) - processor.CaptureDeleteChange(&ngfAPIv1alpha1.NginxProxy{}, npNsName) + processor.CaptureDeleteChange(&ngfAPIv1alpha2.NginxProxy{}, npNsName) // these are non-changing changes processor.CaptureUpsertChange(gw2) @@ -3097,20 +3612,18 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gr2) processor.CaptureUpsertChange(rg2) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) - It("should report changed after deleting resources", func() { + It("should build graph after deleting resources", func() { processor.CaptureDeleteChange(&v1.HTTPRoute{}, hr2NsName) processor.CaptureDeleteChange(&v1.HTTPRoute{}, gr2NsName) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) Describe("Deleting non-existing Gateway API resource", func() { - It("should not report changed after deleting non-existing", func() { + It("should not build graph after deleting non-existing", func() { processor.CaptureDeleteChange(&v1.GatewayClass{}, gcNsName) processor.CaptureDeleteChange(&v1.Gateway{}, gwNsName) processor.CaptureDeleteChange(&v1.HTTPRoute{}, hrNsName) @@ -3119,8 +3632,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&v1.HTTPRoute{}, gr2NsName) processor.CaptureDeleteChange(&v1beta1.ReferenceGrant{}, rgNsName) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) }) Describe("Multiple Kubernetes API resource changes", Ordered, func() { @@ -3134,31 +3646,28 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) processor.CaptureUpsertChange(barSecret) processor.CaptureUpsertChange(cm) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report changed after multiple Upserts of related resources", func() { + It("should build graph after multiple Upserts of related resources", func() { processor.CaptureUpsertChange(svc) processor.CaptureUpsertChange(slice) processor.CaptureUpsertChange(ns) processor.CaptureUpsertChange(secretUpdated) processor.CaptureUpsertChange(cmUpdated) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report not changed after multiple Upserts of unrelated resources", func() { + It("should not build graph after multiple Upserts of unrelated resources", func() { processor.CaptureUpsertChange(unrelatedSvc) processor.CaptureUpsertChange(unrelatedSlice) processor.CaptureUpsertChange(unrelatedNS) processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) When("upserts of related resources are followed by upserts of unrelated resources", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureUpsertChange(barSvc) processor.CaptureUpsertChange(barSlice) @@ -3173,12 +3682,11 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) When("deletes of related resources are followed by upserts of unrelated resources", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureDeleteChange(&apiv1.Service{}, svcNsName) processor.CaptureDeleteChange(&discoveryV1.EndpointSlice{}, sliceNsName) @@ -3193,13 +3701,12 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) }) Describe("Multiple Kubernetes API and Gateway API resource changes", Ordered, func() { - It("should report changed after multiple Upserts of new and related resources", func() { + It("should build graph after multiple Upserts of new and related resources", func() { // new Gateway API resources processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw1) @@ -3216,10 +3723,9 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) processor.CaptureUpsertChange(cm) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report not changed after multiple Upserts of unrelated resources", func() { + It("should not build graph after multiple Upserts of unrelated resources", func() { // unrelated Kubernetes API resources processor.CaptureUpsertChange(unrelatedSvc) processor.CaptureUpsertChange(unrelatedSlice) @@ -3227,10 +3733,9 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) - It("should report changed after upserting changed resources followed by upserting unrelated resources", + It("should build graph after upserting changed resources followed by upserting unrelated resources", func() { // these are changing changes processor.CaptureUpsertChange(gcUpdated) @@ -3247,8 +3752,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }, ) }) diff --git a/internal/mode/static/state/conditions/conditions.go b/internal/mode/static/state/conditions/conditions.go index 84ba2d971a..e864e12103 100644 --- a/internal/mode/static/state/conditions/conditions.go +++ b/internal/mode/static/state/conditions/conditions.go @@ -19,7 +19,7 @@ const ( // ListenerMessageFailedNginxReload is a message used with ListenerConditionProgrammed (false) // when nginx fails to reload. ListenerMessageFailedNginxReload = "The Listener is not programmed due to a failure to " + - "reload nginx with the configuration. Please see the nginx container logs for any possible configuration issues." + "reload nginx with the configuration" // RouteReasonBackendRefUnsupportedValue is used with the "ResolvedRefs" condition when one of the // Route rules has a backendRef with an unsupported value. @@ -53,14 +53,6 @@ const ( // invalid. Used with ResolvedRefs (false). RouteReasonInvalidFilter v1.RouteConditionReason = "InvalidFilter" - // GatewayReasonGatewayConflict indicates there are multiple Gateway resources to choose from, - // and we ignored the resource in question and picked another Gateway as the winner. - // This reason is used with GatewayConditionAccepted (false). - GatewayReasonGatewayConflict v1.GatewayConditionReason = "GatewayConflict" - - // GatewayMessageGatewayConflict is a message that describes GatewayReasonGatewayConflict. - GatewayMessageGatewayConflict = "The resource is ignored due to a conflicting Gateway resource" - // GatewayReasonUnsupportedValue is used with GatewayConditionAccepted (false) when a value of a field in a Gateway // is invalid or not supported. GatewayReasonUnsupportedValue v1.GatewayConditionReason = "UnsupportedValue" @@ -68,7 +60,7 @@ const ( // GatewayMessageFailedNginxReload is a message used with GatewayConditionProgrammed (false) // when nginx fails to reload. GatewayMessageFailedNginxReload = "The Gateway is not programmed due to a failure to " + - "reload nginx with the configuration. Please see the nginx container logs for any possible configuration issues" + "reload nginx with the configuration" // RouteMessageFailedNginxReload is a message used with RouteReasonGatewayNotProgrammed // when nginx fails to reload. @@ -87,6 +79,10 @@ const ( // parametersRef resource does not exist. GatewayClassReasonParamsRefNotFound v1.GatewayClassConditionReason = "ParametersRefNotFound" + // GatewayClassReasonParamsRefInvalid is used with the "GatewayClassResolvedRefs" condition when the + // parametersRef resource is invalid. + GatewayClassReasonParamsRefInvalid v1.GatewayClassConditionReason = "ParametersRefInvalid" + // PolicyReasonNginxProxyConfigNotSet is used with the "PolicyAccepted" condition when the // NginxProxy resource is missing or invalid. PolicyReasonNginxProxyConfigNotSet v1alpha2.PolicyConditionReason = "NginxProxyConfigNotSet" @@ -103,21 +99,21 @@ const ( // has an overlapping hostname:port/path combination with another Route. PolicyReasonTargetConflict v1alpha2.PolicyConditionReason = "TargetConflict" - // GatewayIgnoredReason is used with v1.RouteConditionAccepted when the route references a Gateway that is ignored - // by NGF. - GatewayIgnoredReason v1.RouteConditionReason = "GatewayIgnored" -) + // GatewayResolvedRefs condition indicates whether the controller was able to resolve the + // parametersRef on the Gateway. + GatewayResolvedRefs v1.GatewayConditionType = "ResolvedRefs" -// NewRouteNotAcceptedGatewayIgnored returns a Condition that indicates that the Route is not accepted by the Gateway -// because the Gateway is ignored by NGF. -func NewRouteNotAcceptedGatewayIgnored() conditions.Condition { - return conditions.Condition{ - Type: string(v1.RouteConditionAccepted), - Status: metav1.ConditionFalse, - Reason: string(GatewayIgnoredReason), - Message: "The Gateway is ignored by the controller", - } -} + // GatewayReasonResolvedRefs is used with the "GatewayResolvedRefs" condition when the condition is true. + GatewayReasonResolvedRefs v1.GatewayConditionReason = "ResolvedRefs" + + // GatewayReasonParamsRefNotFound is used with the "GatewayResolvedRefs" condition when the + // parametersRef resource does not exist. + GatewayReasonParamsRefNotFound v1.GatewayConditionReason = "ParametersRefNotFound" + + // GatewayReasonParamsRefInvalid is used with the "GatewayResolvedRefs" condition when the + // parametersRef resource is invalid. + GatewayReasonParamsRefInvalid v1.GatewayConditionReason = "ParametersRefInvalid" +) // NewDefaultRouteConditions returns the default conditions that must be present in the status of a Route. func NewDefaultRouteConditions() []conditions.Condition { @@ -514,7 +510,7 @@ func NewGatewayClassResolvedRefs() conditions.Condition { Type: string(GatewayClassResolvedRefs), Status: metav1.ConditionTrue, Reason: string(GatewayClassReasonResolvedRefs), - Message: "parametersRef resource is resolved", + Message: "ParametersRef resource is resolved", } } @@ -525,7 +521,18 @@ func NewGatewayClassRefNotFound() conditions.Condition { Type: string(GatewayClassResolvedRefs), Status: metav1.ConditionFalse, Reason: string(GatewayClassReasonParamsRefNotFound), - Message: "parametersRef resource could not be found", + Message: "ParametersRef resource could not be found", + } +} + +// NewGatewayClassRefInvalid returns a Condition that indicates that the parametersRef +// on the GatewayClass could not be resolved because the resource it references is invalid. +func NewGatewayClassRefInvalid(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(GatewayClassResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayClassReasonParamsRefInvalid), + Message: msg, } } @@ -537,7 +544,7 @@ func NewGatewayClassInvalidParameters(msg string) conditions.Condition { Type: string(v1.GatewayClassConditionStatusAccepted), Status: metav1.ConditionTrue, Reason: string(v1.GatewayClassReasonInvalidParameters), - Message: fmt.Sprintf("GatewayClass is accepted, but parametersRef is ignored due to an error: %s", msg), + Message: fmt.Sprintf("GatewayClass is accepted, but ParametersRef is ignored due to an error: %s", msg), } } @@ -559,19 +566,6 @@ func NewGatewayAccepted() conditions.Condition { } } -// NewGatewayConflict returns Conditions that indicate the Gateway has a conflict with another Gateway. -func NewGatewayConflict() []conditions.Condition { - return []conditions.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonGatewayConflict), - Message: GatewayMessageGatewayConflict, - }, - NewGatewayConflictNotProgrammed(), - } -} - // NewGatewayAcceptedListenersNotValid returns a Condition that indicates the Gateway is accepted, // but has at least one listener that is invalid. func NewGatewayAcceptedListenersNotValid() conditions.Condition { @@ -653,17 +647,6 @@ func NewGatewayNotProgrammedInvalid(msg string) conditions.Condition { } } -// NewGatewayConflictNotProgrammed returns a custom Programmed Condition that indicates the Gateway has a -// conflict with another Gateway. -func NewGatewayConflictNotProgrammed() conditions.Condition { - return conditions.Condition{ - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonGatewayConflict), - Message: GatewayMessageGatewayConflict, - } -} - // NewNginxGatewayValid returns a Condition that indicates that the NginxGateway config is valid. func NewNginxGatewayValid() conditions.Condition { return conditions.Condition{ @@ -684,6 +667,51 @@ func NewNginxGatewayInvalid(msg string) conditions.Condition { } } +// NewGatewayResolvedRefs returns a Condition that indicates that the parametersRef +// on the Gateway is resolved. +func NewGatewayResolvedRefs() conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionTrue, + Reason: string(GatewayReasonResolvedRefs), + Message: "ParametersRef resource is resolved", + } +} + +// NewGatewayRefNotFound returns a Condition that indicates that the parametersRef +// on the Gateway could not be resolved. +func NewGatewayRefNotFound() conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayReasonParamsRefNotFound), + Message: "ParametersRef resource could not be found", + } +} + +// NewGatewayRefInvalid returns a Condition that indicates that the parametersRef +// on the Gateway could not be resolved because the referenced resource is invalid. +func NewGatewayRefInvalid(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayReasonParamsRefInvalid), + Message: msg, + } +} + +// NewGatewayInvalidParameters returns a Condition that indicates that the Gateway has invalid parameters. +// We are allowing Accepted to still be true to prevent nullifying the entire Gateway config if a parametersRef +// is updated to something invalid. +func NewGatewayInvalidParameters(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + Reason: string(v1.GatewayReasonInvalidParameters), + Message: fmt.Sprintf("Gateway is accepted, but ParametersRef is ignored due to an error: %s", msg), + } +} + // NewPolicyAccepted returns a Condition that indicates that the Policy is accepted. func NewPolicyAccepted() conditions.Condition { return conditions.Condition{ diff --git a/internal/mode/static/state/dataplane/configuration.go b/internal/mode/static/state/dataplane/configuration.go index 84dc4b0cdb..e0de92d14d 100644 --- a/internal/mode/static/state/dataplane/configuration.go +++ b/internal/mode/static/state/dataplane/configuration.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "slices" "sort" discoveryV1 "k8s.io/api/discovery/v1" @@ -30,26 +31,26 @@ const ( func BuildConfiguration( ctx context.Context, g *graph.Graph, + gateway *graph.Gateway, serviceResolver resolver.ServiceResolver, - configVersion int, plus bool, ) Configuration { - if g.GatewayClass == nil || !g.GatewayClass.Valid || g.Gateway == nil { - config := GetDefaultConfiguration(g, configVersion) + if g.GatewayClass == nil || !g.GatewayClass.Valid || gateway == nil { + config := GetDefaultConfiguration(g, gateway) if plus { - config.NginxPlus = buildNginxPlus(g) + config.NginxPlus = buildNginxPlus(gateway) } return config } - baseHTTPConfig := buildBaseHTTPConfig(g) + baseHTTPConfig := buildBaseHTTPConfig(g, gateway) - httpServers, sslServers := buildServers(g) + httpServers, sslServers := buildServers(gateway) backendGroups := buildBackendGroups(append(httpServers, sslServers...)) upstreams := buildUpstreams( ctx, - g.Gateway.Listeners, + gateway, serviceResolver, g.ReferencedServices, baseHTTPConfig.IPFamily, @@ -57,25 +58,24 @@ func BuildConfiguration( var nginxPlus NginxPlus if plus { - nginxPlus = buildNginxPlus(g) + nginxPlus = buildNginxPlus(gateway) } config := Configuration{ HTTPServers: httpServers, SSLServers: sslServers, - TLSPassthroughServers: buildPassthroughServers(g), + TLSPassthroughServers: buildPassthroughServers(gateway), Upstreams: upstreams, - StreamUpstreams: buildStreamUpstreams(ctx, g.Gateway.Listeners, serviceResolver, baseHTTPConfig.IPFamily), + StreamUpstreams: buildStreamUpstreams(ctx, gateway, serviceResolver, baseHTTPConfig.IPFamily), BackendGroups: backendGroups, - SSLKeyPairs: buildSSLKeyPairs(g.ReferencedSecrets, g.Gateway.Listeners), - Version: configVersion, + SSLKeyPairs: buildSSLKeyPairs(g.ReferencedSecrets, gateway.Listeners), CertBundles: buildCertBundles( buildRefCertificateBundles(g.ReferencedSecrets, g.ReferencedCaCertConfigMaps), backendGroups, ), - Telemetry: buildTelemetry(g), + Telemetry: buildTelemetry(g, gateway), BaseHTTPConfig: baseHTTPConfig, - Logging: buildLogging(g), + Logging: buildLogging(gateway), NginxPlus: nginxPlus, MainSnippets: buildSnippetsForContext(g.SnippetsFilters, ngfAPIv1alpha1.NginxContextMain), AuxiliarySecrets: buildAuxiliarySecrets(g.PlusSecrets), @@ -85,13 +85,13 @@ func BuildConfiguration( } // buildPassthroughServers builds TLSPassthroughServers from TLSRoutes attaches to listeners. -func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { +func buildPassthroughServers(gateway *graph.Gateway) []Layer4VirtualServer { passthroughServersMap := make(map[graph.L4RouteKey][]Layer4VirtualServer) listenerPassthroughServers := make([]Layer4VirtualServer, 0) passthroughServerCount := 0 - for _, l := range g.Gateway.Listeners { + for _, l := range gateway.Listeners { if !l.Valid || l.Source.Protocol != v1.TLSProtocolType { continue } @@ -104,7 +104,8 @@ func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { var hostnames []string for _, p := range r.ParentRefs { - if val, exist := p.Attachment.AcceptedHostnames[l.Name]; exist { + key := graph.CreateGatewayListenerKey(l.GatewayName, l.Name) + if val, exist := p.Attachment.AcceptedHostnames[key]; exist { hostnames = val break } @@ -156,7 +157,7 @@ func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { // buildStreamUpstreams builds all stream upstreams. func buildStreamUpstreams( ctx context.Context, - listeners []*graph.Listener, + gateway *graph.Gateway, serviceResolver resolver.ServiceResolver, ipFamily IPFamilyType, ) []Upstream { @@ -164,7 +165,7 @@ func buildStreamUpstreams( // We use a map to deduplicate them. uniqueUpstreams := make(map[string]Upstream) - for _, l := range listeners { + for _, l := range gateway.Listeners { if !l.Valid || l.Source.Protocol != v1.TLSProtocolType { continue } @@ -180,6 +181,11 @@ func buildStreamUpstreams( continue } + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if _, ok := br.InvalidForGateways[gatewayNSName]; ok { + continue + } + upstreamName := br.ServicePortReference() if _, exist := uniqueUpstreams[upstreamName]; exist { @@ -337,7 +343,12 @@ func buildBackendGroups(servers []VirtualServer) []BackendGroup { return groups } -func newBackendGroup(refs []graph.BackendRef, sourceNsName types.NamespacedName, ruleIdx int) BackendGroup { +func newBackendGroup( + refs []graph.BackendRef, + gatewayName types.NamespacedName, + sourceNsName types.NamespacedName, + ruleIdx int, +) BackendGroup { var backends []Backend if len(refs) > 0 { @@ -349,10 +360,15 @@ func newBackendGroup(refs []graph.BackendRef, sourceNsName types.NamespacedName, continue } + valid := ref.Valid + if _, ok := ref.InvalidForGateways[gatewayName]; ok { + valid = false + } + backends = append(backends, Backend{ UpstreamName: ref.ServicePortReference(), Weight: ref.Weight, - Valid: ref.Valid, + Valid: valid, VerifyTLS: convertBackendTLS(ref.BackendTLSPolicy), }) } @@ -378,13 +394,13 @@ func convertBackendTLS(btp *graph.BackendTLSPolicy) *VerifyTLS { return verify } -func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { +func buildServers(gateway *graph.Gateway) (http, ssl []VirtualServer) { rulesForProtocol := map[v1.ProtocolType]portPathRules{ v1.HTTPProtocolType: make(portPathRules), v1.HTTPSProtocolType: make(portPathRules), } - for _, l := range g.Gateway.Listeners { + for _, l := range gateway.Listeners { if l.Source.Protocol == v1.TLSProtocolType { continue } @@ -395,7 +411,7 @@ func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { rulesForProtocol[l.Source.Protocol][l.Source.Port] = rules } - rules.upsertListener(l) + rules.upsertListener(l, gateway) } } @@ -404,7 +420,7 @@ func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { httpServers, sslServers := httpRules.buildServers(), sslRules.buildServers() - pols := buildPolicies(g.Gateway.Policies) + pols := buildPolicies(gateway, gateway.Policies) for i := range httpServers { httpServers[i].Policies = pols @@ -456,7 +472,7 @@ func newHostPathRules() *hostPathRules { } } -func (hpr *hostPathRules) upsertListener(l *graph.Listener) { +func (hpr *hostPathRules) upsertListener(l *graph.Listener, gateway *graph.Gateway) { hpr.listenersExist = true hpr.port = int32(l.Source.Port) @@ -469,13 +485,14 @@ func (hpr *hostPathRules) upsertListener(l *graph.Listener) { continue } - hpr.upsertRoute(r, l) + hpr.upsertRoute(r, l, gateway) } } func (hpr *hostPathRules) upsertRoute( route *graph.L7Route, listener *graph.Listener, + gateway *graph.Gateway, ) { var hostnames []string GRPC := route.RouteType == graph.RouteTypeGRPC @@ -489,7 +506,9 @@ func (hpr *hostPathRules) upsertRoute( } for _, p := range route.ParentRefs { - if val, exist := p.Attachment.AcceptedHostnames[string(listener.Source.Name)]; exist { + key := graph.CreateGatewayListenerKey(listener.GatewayName, listener.Name) + + if val, exist := p.Attachment.AcceptedHostnames[key]; exist { hostnames = val break } @@ -524,7 +543,7 @@ func (hpr *hostPathRules) upsertRoute( } } - pols := buildPolicies(route.Policies) + pols := buildPolicies(gateway, route.Policies) for _, h := range hostnames { for _, m := range rule.Matches { @@ -548,7 +567,7 @@ func (hpr *hostPathRules) upsertRoute( hostRule.MatchRules = append(hostRule.MatchRules, MatchRule{ Source: objectSrc, - BackendGroup: newBackendGroup(rule.BackendRefs, routeNsName, idx), + BackendGroup: newBackendGroup(rule.BackendRefs, listener.GatewayName, routeNsName, idx), Filters: filters, Match: convertMatch(m), }) @@ -645,7 +664,7 @@ func (hpr *hostPathRules) maxServerCount() int { func buildUpstreams( ctx context.Context, - listeners []*graph.Listener, + gateway *graph.Gateway, svcResolver resolver.ServiceResolver, referencedServices map[types.NamespacedName]*graph.ReferencedService, ipFamily IPFamilyType, @@ -657,7 +676,7 @@ func buildUpstreams( // We need to build endpoints based on the IPFamily of NGINX. allowedAddressType := getAllowedAddressType(ipFamily) - for _, l := range listeners { + for _, l := range gateway.Listeners { if !l.Valid { continue } @@ -672,33 +691,18 @@ func buildUpstreams( // don't generate upstreams for rules that have invalid matches or filters continue } + for _, br := range rule.BackendRefs { - if br.Valid { - upstreamName := br.ServicePortReference() - _, exist := uniqueUpstreams[upstreamName] - - if exist { - continue - } - - var errMsg string - - eps, err := svcResolver.Resolve(ctx, br.SvcNsName, br.ServicePort, allowedAddressType) - if err != nil { - errMsg = err.Error() - } - - var upstreamPolicies []policies.Policy - if graphSvc, exists := referencedServices[br.SvcNsName]; exists { - upstreamPolicies = buildPolicies(graphSvc.Policies) - } - - uniqueUpstreams[upstreamName] = Upstream{ - Name: upstreamName, - Endpoints: eps, - ErrorMsg: errMsg, - Policies: upstreamPolicies, - } + if upstream := buildUpstream( + ctx, + br, + gateway, + svcResolver, + referencedServices, + uniqueUpstreams, + allowedAddressType, + ); upstream != nil { + uniqueUpstreams[upstream.Name] = *upstream } } } @@ -714,9 +718,60 @@ func buildUpstreams( for _, up := range uniqueUpstreams { upstreams = append(upstreams, up) } + + // Preserve order so that this doesn't trigger an unnecessary reload. + sort.Slice(upstreams, func(i, j int) bool { + return upstreams[i].Name < upstreams[j].Name + }) + return upstreams } +func buildUpstream( + ctx context.Context, + br graph.BackendRef, + gateway *graph.Gateway, + svcResolver resolver.ServiceResolver, + referencedServices map[types.NamespacedName]*graph.ReferencedService, + uniqueUpstreams map[string]Upstream, + allowedAddressType []discoveryV1.AddressType, +) *Upstream { + if !br.Valid { + return nil + } + + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if _, ok := br.InvalidForGateways[gatewayNSName]; ok { + return nil + } + + upstreamName := br.ServicePortReference() + _, exist := uniqueUpstreams[upstreamName] + + if exist { + return nil + } + + var errMsg string + + eps, err := svcResolver.Resolve(ctx, br.SvcNsName, br.ServicePort, allowedAddressType) + if err != nil { + errMsg = err.Error() + } + + var upstreamPolicies []policies.Policy + if graphSvc, exists := referencedServices[br.SvcNsName]; exists { + upstreamPolicies = buildPolicies(gateway, graphSvc.Policies) + } + + return &Upstream{ + Name: upstreamName, + Endpoints: eps, + ErrorMsg: errMsg, + Policies: upstreamPolicies, + } +} + func getAllowedAddressType(ipFamily IPFamilyType) []discoveryV1.AddressType { switch ipFamily { case IPv4: @@ -816,22 +871,42 @@ func generateCertBundleID(caCertRef types.NamespacedName) CertBundleID { return CertBundleID(fmt.Sprintf("cert_bundle_%s_%s", caCertRef.Namespace, caCertRef.Name)) } +func telemetryEnabled(gw *graph.Gateway) bool { + if gw == nil { + return false + } + + if gw.EffectiveNginxProxy == nil || gw.EffectiveNginxProxy.Telemetry == nil { + return false + } + + tel := gw.EffectiveNginxProxy.Telemetry + + if slices.Contains(tel.DisabledFeatures, ngfAPIv1alpha2.DisableTracing) { + return false + } + + if tel.Exporter == nil || tel.Exporter.Endpoint == nil { + return false + } + + return true +} + // buildTelemetry generates the Otel configuration. -func buildTelemetry(g *graph.Graph) Telemetry { - if g.NginxProxy == nil || !g.NginxProxy.Valid || - g.NginxProxy.Source.Spec.Telemetry == nil || - g.NginxProxy.Source.Spec.Telemetry.Exporter == nil { +func buildTelemetry(g *graph.Graph, gateway *graph.Gateway) Telemetry { + if !telemetryEnabled(gateway) { return Telemetry{} } - serviceName := fmt.Sprintf("ngf:%s:%s", g.Gateway.Source.Namespace, g.Gateway.Source.Name) - telemetry := g.NginxProxy.Source.Spec.Telemetry + serviceName := fmt.Sprintf("ngf:%s:%s", gateway.Source.Namespace, gateway.Source.Name) + telemetry := gateway.EffectiveNginxProxy.Telemetry if telemetry.ServiceName != nil { serviceName = serviceName + ":" + *telemetry.ServiceName } tel := Telemetry{ - Endpoint: telemetry.Exporter.Endpoint, + Endpoint: *telemetry.Exporter.Endpoint, // safe to deref here since we verified that telemetry is enabled ServiceName: serviceName, } @@ -888,48 +963,51 @@ func CreateRatioVarName(ratio int32) string { } // buildBaseHTTPConfig generates the base http context config that should be applied to all servers. -func buildBaseHTTPConfig(g *graph.Graph) BaseHTTPConfig { +func buildBaseHTTPConfig(g *graph.Graph, gateway *graph.Gateway) BaseHTTPConfig { baseConfig := BaseHTTPConfig{ // HTTP2 should be enabled by default HTTP2: true, IPFamily: Dual, Snippets: buildSnippetsForContext(g.SnippetsFilters, ngfAPIv1alpha1.NginxContextHTTP), } - if g.NginxProxy == nil || !g.NginxProxy.Valid { + + // safe to access EffectiveNginxProxy since we only call this function when the Gateway is not nil. + np := gateway.EffectiveNginxProxy + if np == nil { return baseConfig } - if g.NginxProxy.Source.Spec.DisableHTTP2 { + if np.DisableHTTP2 != nil && *np.DisableHTTP2 { baseConfig.HTTP2 = false } - if g.NginxProxy.Source.Spec.IPFamily != nil { - switch *g.NginxProxy.Source.Spec.IPFamily { - case ngfAPIv1alpha1.IPv4: + if np.IPFamily != nil { + switch *np.IPFamily { + case ngfAPIv1alpha2.IPv4: baseConfig.IPFamily = IPv4 - case ngfAPIv1alpha1.IPv6: + case ngfAPIv1alpha2.IPv6: baseConfig.IPFamily = IPv6 } } - if g.NginxProxy.Source.Spec.RewriteClientIP != nil { - if g.NginxProxy.Source.Spec.RewriteClientIP.Mode != nil { - switch *g.NginxProxy.Source.Spec.RewriteClientIP.Mode { - case ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol: + if np.RewriteClientIP != nil { + if np.RewriteClientIP.Mode != nil { + switch *np.RewriteClientIP.Mode { + case ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol: baseConfig.RewriteClientIPSettings.Mode = RewriteIPModeProxyProtocol - case ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor: + case ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor: baseConfig.RewriteClientIPSettings.Mode = RewriteIPModeXForwardedFor } } - if len(g.NginxProxy.Source.Spec.RewriteClientIP.TrustedAddresses) > 0 { + if len(np.RewriteClientIP.TrustedAddresses) > 0 { baseConfig.RewriteClientIPSettings.TrustedAddresses = convertAddresses( - g.NginxProxy.Source.Spec.RewriteClientIP.TrustedAddresses, + np.RewriteClientIP.TrustedAddresses, ) } - if g.NginxProxy.Source.Spec.RewriteClientIP.SetIPRecursively != nil { - baseConfig.RewriteClientIPSettings.IPRecursive = *g.NginxProxy.Source.Spec.RewriteClientIP.SetIPRecursively + if np.RewriteClientIP.SetIPRecursively != nil { + baseConfig.RewriteClientIPSettings.IPRecursive = *np.RewriteClientIP.SetIPRecursively } } @@ -975,8 +1053,8 @@ func buildSnippetsForContext( return snippetsForContext } -func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { - if len(graphPolicies) == 0 { +func buildPolicies(gateway *graph.Gateway, graphPolicies []*graph.Policy) []policies.Policy { + if len(graphPolicies) == 0 || gateway == nil { return nil } @@ -986,6 +1064,9 @@ func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { if !policy.Valid { continue } + if _, exists := policy.InvalidForGateways[client.ObjectKeyFromObject(gateway.Source)]; exists { + continue + } finalPolicies = append(finalPolicies, policy.Source) } @@ -993,7 +1074,7 @@ func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { return finalPolicies } -func convertAddresses(addresses []ngfAPIv1alpha1.RewriteClientIPAddress) []string { +func convertAddresses(addresses []ngfAPIv1alpha2.RewriteClientIPAddress) []string { trustedAddresses := make([]string, len(addresses)) for i, addr := range addresses { trustedAddresses[i] = addr.Value @@ -1001,13 +1082,17 @@ func convertAddresses(addresses []ngfAPIv1alpha1.RewriteClientIPAddress) []strin return trustedAddresses } -func buildLogging(g *graph.Graph) Logging { +func buildLogging(gateway *graph.Gateway) Logging { logSettings := Logging{ErrorLevel: defaultErrorLogLevel} - ngfProxy := g.NginxProxy - if ngfProxy != nil && ngfProxy.Source.Spec.Logging != nil { - if ngfProxy.Source.Spec.Logging.ErrorLevel != nil { - logSettings.ErrorLevel = string(*ngfProxy.Source.Spec.Logging.ErrorLevel) + if gateway == nil || gateway.EffectiveNginxProxy == nil { + return logSettings + } + + ngfProxy := gateway.EffectiveNginxProxy + if ngfProxy.Logging != nil { + if ngfProxy.Logging.ErrorLevel != nil { + logSettings.ErrorLevel = string(*ngfProxy.Logging.ErrorLevel) } } @@ -1028,14 +1113,18 @@ func buildAuxiliarySecrets( return auxSecrets } -func buildNginxPlus(g *graph.Graph) NginxPlus { +func buildNginxPlus(gateway *graph.Gateway) NginxPlus { nginxPlusSettings := NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} - ngfProxy := g.NginxProxy - if ngfProxy != nil && ngfProxy.Source.Spec.NginxPlus != nil { - if ngfProxy.Source.Spec.NginxPlus.AllowedAddresses != nil { - addresses := make([]string, 0, len(ngfProxy.Source.Spec.NginxPlus.AllowedAddresses)) - for _, addr := range ngfProxy.Source.Spec.NginxPlus.AllowedAddresses { + if gateway == nil || gateway.EffectiveNginxProxy == nil { + return nginxPlusSettings + } + + ngfProxy := gateway.EffectiveNginxProxy + if ngfProxy.NginxPlus != nil { + if ngfProxy.NginxPlus.AllowedAddresses != nil { + addresses := make([]string, 0, len(ngfProxy.NginxPlus.AllowedAddresses)) + for _, addr := range ngfProxy.NginxPlus.AllowedAddresses { addresses = append(addresses, addr.Value) } @@ -1046,10 +1135,9 @@ func buildNginxPlus(g *graph.Graph) NginxPlus { return nginxPlusSettings } -func GetDefaultConfiguration(g *graph.Graph, configVersion int) Configuration { +func GetDefaultConfiguration(g *graph.Graph, gateway *graph.Gateway) Configuration { return Configuration{ - Version: configVersion, - Logging: buildLogging(g), + Logging: buildLogging(gateway), NginxPlus: NginxPlus{}, AuxiliarySecrets: buildAuxiliarySecrets(g.PlusSecrets), } diff --git a/internal/mode/static/state/dataplane/configuration_test.go b/internal/mode/static/state/dataplane/configuration_test.go index b13a7845a5..78e4869c59 100644 --- a/internal/mode/static/state/dataplane/configuration_test.go +++ b/internal/mode/static/state/dataplane/configuration_test.go @@ -21,6 +21,7 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" @@ -70,15 +71,27 @@ func getExpectedConfiguration() Configuration { } } +var gatewayNsName = types.NamespacedName{ + Namespace: "test", + Name: "gateway", +} + func getNormalGraph() *graph.Graph { return &graph.Graph{ GatewayClass: &graph.GatewayClass{ Source: &v1.GatewayClass{}, Valid: true, }, - Gateway: &graph.Gateway{ - Source: &v1.Gateway{}, - Listeners: []*graph.Listener{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + gatewayNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Listeners: []*graph.Listener{}, + }, }, Routes: map[graph.RouteKey]*graph.L7Route{}, ReferencedSecrets: map[types.NamespacedName]*graph.Secret{}, @@ -255,9 +268,12 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, ParentRefs: []graph.ParentRef{ { + Gateway: &graph.ParentRefGateway{ + NamespacedName: gatewayNsName, + }, Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - listenerName: hostnames, + graph.CreateGatewayListenerKey(gatewayNsName, listenerName): hostnames, }, }, }, @@ -473,7 +489,8 @@ func TestBuildConfiguration(t *testing.T) { pathAndType{path: "/", pathType: prefix}, ) // add extra attachment for this route for duplicate listener test - httpsRouteHR5.ParentRefs[0].Attachment.AcceptedHostnames["listener-443-1"] = []string{"example.com"} + key := graph.CreateGatewayListenerKey(gatewayNsName, "listener-443-1") + httpsRouteHR5.ParentRefs[0].Attachment.AcceptedHostnames[key] = []string{"example.com"} httpsHR6, expHTTPSHR6Groups, httpsRouteHR6 := createTestResources( "https-hr-6", @@ -506,14 +523,14 @@ func TestBuildConfiguration(t *testing.T) { { Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443-2": {"app.example.com"}, + graph.CreateGatewayListenerKey(gatewayNsName, "listener-443-2"): {"app.example.com"}, }, }, }, { Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-444-3": {"app.example.com"}, + graph.CreateGatewayListenerKey(gatewayNsName, "listener-444-3"): {"app.example.com"}, }, }, }, @@ -901,43 +918,26 @@ func TestBuildConfiguration(t *testing.T) { }, } - nginxProxy := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-otel.svc:4563", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), - }, - ServiceName: helpers.GetPointer("my-svc"), - }, - DisableHTTP2: true, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.Dual), + nginxProxy := &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), }, + ServiceName: helpers.GetPointer("my-svc"), }, - Valid: true, + DisableHTTP2: helpers.GetPointer(true), + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), } - nginxProxyIPv4 := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{}, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.IPv4), - }, - }, - Valid: true, + nginxProxyIPv4 := &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv4), } - nginxProxyIPv6 := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{}, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.IPv6), - }, - }, - Valid: true, + nginxProxyIPv6 := &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), } defaultConfig := Configuration{ @@ -962,10 +962,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, }) return g }), @@ -978,19 +980,22 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr1Invalid): routeHR1Invalid, }, }, { - Name: "listener-443-1", - Source: listener443, // nil hostname - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, // nil hostname + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR1Invalid): httpsRouteHR1Invalid, }, @@ -1017,9 +1022,11 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { Name: "listener-443-1", + GatewayName: gatewayNsName, Source: listener443, // nil hostname Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1027,6 +1034,7 @@ func TestBuildConfiguration(t *testing.T) { }, { Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, Source: listener443WithHostname, // non-nil hostname Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1063,8 +1071,10 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ Name: "invalid-listener", + GatewayName: gatewayNsName, Source: invalidListener, Valid: false, ResolvedSecret: &secret1NsName, @@ -1086,10 +1096,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr1): routeHR1, graph.CreateRouteKey(hr2): routeHR2, @@ -1147,10 +1159,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(gr): routeGR, }, @@ -1187,11 +1201,13 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR1): httpsRouteHR1, graph.CreateRouteKey(httpsHR2): httpsRouteHR2, @@ -1199,9 +1215,10 @@ func TestBuildConfiguration(t *testing.T) { ResolvedSecret: &secret1NsName, }, { - Name: "listener-443-with-hostname", - Source: listener443WithHostname, - Valid: true, + Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, + Source: listener443WithHostname, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, @@ -1297,20 +1314,23 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr3): routeHR3, graph.CreateRouteKey(hr4): routeHR4, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR3): httpsRouteHR3, graph.CreateRouteKey(httpsHR4): httpsRouteHR4, @@ -1437,36 +1457,41 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr3): routeHR3, }, }, { - Name: "listener-8080", - Source: listener8080, - Valid: true, + Name: "listener-8080", + GatewayName: gatewayNsName, + Source: listener8080, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr8): routeHR8, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR3): httpsRouteHR3, }, ResolvedSecret: &secret1NsName, }, { - Name: "listener-8443", - Source: listener8443, - Valid: true, + Name: "listener-8443", + GatewayName: gatewayNsName, + Source: listener8443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR7): httpsRouteHR7, }, @@ -1646,7 +1671,7 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway = nil + delete(g.Gateways, gatewayNsName) return g }), expConf: defaultConfig, @@ -1654,10 +1679,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr5): routeHR5, }, @@ -1713,29 +1740,33 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr6): routeHR6, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR6): httpsRouteHR6, }, ResolvedSecret: &secret1NsName, }, { - Name: "listener-443-2", - Source: listener443_2, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + Name: "listener-443-2", + GatewayName: gatewayNsName, + Source: listener443_2, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{ TR1Key: &tlsTR1, TR2Key: &invalidBackendRefTR2, @@ -1743,10 +1774,11 @@ func TestBuildConfiguration(t *testing.T) { ResolvedSecret: &secret1NsName, }, { - Name: "listener-444-3", - Source: listener444_3, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + Name: "listener-444-3", + GatewayName: gatewayNsName, + Source: listener444_3, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{ TR1Key: &tlsTR1, TR2Key: &invalidBackendRefTR2, @@ -1755,6 +1787,7 @@ func TestBuildConfiguration(t *testing.T) { }, { Name: "listener-443-4", + GatewayName: gatewayNsName, Source: listener443_4, Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1857,10 +1890,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr7): routeHR7, }, @@ -1909,20 +1944,23 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-443-with-hostname", - Source: listener443WithHostname, - Valid: true, + Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, + Source: listener443WithHostname, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, ResolvedSecret: &secret2NsName, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, @@ -1987,10 +2025,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-443", - Source: listener443, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR8): httpsRouteHR8, }, @@ -2046,10 +2086,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-443", - Source: listener443, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR9): httpsRouteHR9, }, @@ -2105,10 +2147,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithMirror): routeHRWithMirror, }, @@ -2155,17 +2199,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxy + gw.EffectiveNginxProxy = nginxProxy return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2183,65 +2229,33 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: false, IPFamily: Dual} return conf }), - msg: "NginxProxy with tracing config and http2 disabled", - }, - { - graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", - } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, - }) - g.NginxProxy = &graph.NginxProxy{ - Valid: false, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - DisableHTTP2: true, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.Dual), - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "some-endpoint", - }, - }, - }, - }, - } - return g - }), - expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { - conf.SSLServers = []VirtualServer{} - conf.SSLKeyPairs = map[SSLKeyPairID]SSLKeyPair{} - return conf - }), - msg: "invalid NginxProxy", + msg: "EffectiveNginxProxy with tracing config and http2 disabled", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithPolicy): l7RouteWithPolicy, }, }, { - Name: "listener-443", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHRWithPolicy): l7HTTPSRouteWithPolicy, }, ResolvedSecret: &secret1NsName, }, }...) - g.Gateway.Policies = []*graph.Policy{gwPolicy1, gwPolicy2} + gw.Policies = []*graph.Policy{gwPolicy1, gwPolicy2} g.Routes = map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithPolicy): l7RouteWithPolicy, graph.CreateRouteKey(httpsHRWithPolicy): l7HTTPSRouteWithPolicy, @@ -2317,17 +2331,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxyIPv4 + gw.EffectiveNginxProxy = nginxProxyIPv4 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2336,21 +2352,23 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: true, IPFamily: IPv4} return conf }), - msg: "NginxProxy with IPv4 IPFamily and no routes", + msg: "GatewayClass has NginxProxy with IPv4 IPFamily and no routes", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxyIPv6 + gw.EffectiveNginxProxy = nginxProxyIPv6 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2359,35 +2377,32 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: true, IPFamily: IPv6} return conf }), - msg: "NginxProxy with IPv6 IPFamily and no routes", + msg: "GatewayClass has NginxProxy with IPv6 IPFamily and no routes", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "1.1.1.1/32", - }, - }, - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol), + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + SetIPRecursively: helpers.GetPointer(true), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "1.1.1.1/32", }, }, + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, } return g @@ -2406,26 +2421,25 @@ func TestBuildConfiguration(t *testing.T) { } return conf }), - msg: "NginxProxy with rewriteClientIP details set", + msg: "GatewayClass has NginxProxy with rewriteClientIP details set", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelDebug)}, - }, + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, } return g @@ -2436,7 +2450,7 @@ func TestBuildConfiguration(t *testing.T) { conf.Logging = Logging{ErrorLevel: "debug"} return conf }), - msg: "NginxProxy with error log level set to debug", + msg: "GatewayClass has NginxProxy with error log level set to debug", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2476,26 +2490,23 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, - }, + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, } @@ -2518,8 +2529,8 @@ func TestBuildConfiguration(t *testing.T) { result := BuildConfiguration( context.TODO(), test.graph, + test.graph.Gateways[gatewayNsName], fakeResolver, - 1, false, ) @@ -2529,7 +2540,6 @@ func TestBuildConfiguration(t *testing.T) { g.Expect(result.SSLServers).To(ConsistOf(test.expConf.SSLServers)) g.Expect(result.TLSPassthroughServers).To(ConsistOf(test.expConf.TLSPassthroughServers)) g.Expect(result.SSLKeyPairs).To(Equal(test.expConf.SSLKeyPairs)) - g.Expect(result.Version).To(Equal(1)) g.Expect(result.CertBundles).To(Equal(test.expConf.CertBundles)) g.Expect(result.Telemetry).To(Equal(test.expConf.Telemetry)) g.Expect(result.BaseHTTPConfig).To(Equal(test.expConf.BaseHTTPConfig)) @@ -2570,26 +2580,23 @@ func TestBuildConfiguration_Plus(t *testing.T) { }{ { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, - }, + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, } @@ -2621,7 +2628,7 @@ func TestBuildConfiguration_Plus(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway = nil + delete(g.Gateways, gatewayNsName) return g }), expConf: defaultPlusConfig, @@ -2637,8 +2644,8 @@ func TestBuildConfiguration_Plus(t *testing.T) { result := BuildConfiguration( context.TODO(), test.graph, + test.graph.Gateways[gatewayNsName], fakeResolver, - 1, true, ) @@ -2648,7 +2655,6 @@ func TestBuildConfiguration_Plus(t *testing.T) { g.Expect(result.SSLServers).To(ConsistOf(test.expConf.SSLServers)) g.Expect(result.TLSPassthroughServers).To(ConsistOf(test.expConf.TLSPassthroughServers)) g.Expect(result.SSLKeyPairs).To(Equal(test.expConf.SSLKeyPairs)) - g.Expect(result.Version).To(Equal(1)) g.Expect(result.CertBundles).To(Equal(test.expConf.CertBundles)) g.Expect(result.Telemetry).To(Equal(test.expConf.Telemetry)) g.Expect(result.BaseHTTPConfig).To(Equal(test.expConf.BaseHTTPConfig)) @@ -2669,7 +2675,7 @@ func TestNewBackendGroup_Mirror(t *testing.T) { IsMirrorBackend: true, } - group := newBackendGroup([]graph.BackendRef{backendRef}, types.NamespacedName{}, 0) + group := newBackendGroup([]graph.BackendRef{backendRef}, types.NamespacedName{}, types.NamespacedName{}, 0) g.Expect(group.Backends).To(BeEmpty()) } @@ -3081,6 +3087,13 @@ func TestBuildUpstreams(t *testing.T) { }, } + invalidEndpoints := []resolver.Endpoint{ + { + Address: "11.5.5.5", + Port: 80, + }, + } + bazEndpoints := []resolver.Endpoint{ { Address: "12.0.0.0", @@ -3144,6 +3157,11 @@ func TestBuildUpstreams(t *testing.T) { hr1Refs1 := createBackendRefs("baz", "", "") // empty service names should be ignored + hr1Refs2 := createBackendRefs("invalid-for-gateway") + hr1Refs2[0].InvalidForGateways = map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: {}, + } + hr2Refs0 := createBackendRefs("foo", "baz") // shouldn't duplicate foo and baz upstream hr2Refs1 := createBackendRefs("nil-endpoints") @@ -3166,7 +3184,7 @@ func TestBuildUpstreams(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "hr1", Namespace: "test"}}: { Valid: true, Spec: graph.L7RouteSpec{ - Rules: refsToValidRules(hr1Refs0, hr1Refs1), + Rules: refsToValidRules(hr1Refs0, hr1Refs1, hr1Refs2), }, }, {NamespacedName: types.NamespacedName{Name: "hr2", Namespace: "test"}}: { @@ -3228,36 +3246,44 @@ func TestBuildUpstreams(t *testing.T) { }, } - listeners := []*graph.Listener{ - { - Name: "invalid-listener", - Valid: false, - Routes: routesWithNonExistingRefs, // shouldn't be included since listener is invalid - }, - { - Name: "listener-1", - Valid: true, - Routes: routes, - }, - { - Name: "listener-2", - Valid: true, - Routes: routes2, - }, - { - Name: "listener-3", - Valid: true, - Routes: invalidRoutes, // shouldn't be included since routes are invalid - }, - { - Name: "listener-4", - Valid: true, - Routes: routes3, + gateway := &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, }, - { - Name: "listener-5", - Valid: true, - Routes: routesWithPolicies, + Listeners: []*graph.Listener{ + { + Name: "invalid-listener", + Valid: false, + Routes: routesWithNonExistingRefs, // shouldn't be included since listener is invalid + }, + { + Name: "listener-1", + Valid: true, + Routes: routes, + }, + { + Name: "listener-2", + Valid: true, + Routes: routes2, + }, + { + Name: "listener-3", + Valid: true, + Routes: invalidRoutes, // shouldn't be included since routes are invalid + }, + { + Name: "listener-4", + Valid: true, + Routes: routes3, + }, + { + Name: "listener-5", + Valid: true, + Routes: routesWithPolicies, + }, }, } @@ -3266,13 +3292,14 @@ func TestBuildUpstreams(t *testing.T) { invalidPolicy := &policiesfakes.FakePolicy{} referencedServices := map[types.NamespacedName]*graph.ReferencedService{ - {Name: "bar", Namespace: "test"}: {}, - {Name: "baz", Namespace: "test"}: {}, - {Name: "baz2", Namespace: "test"}: {}, - {Name: "foo", Namespace: "test"}: {}, - {Name: "empty-endpoints", Namespace: "test"}: {}, - {Name: "nil-endpoints", Namespace: "test"}: {}, - {Name: "ipv6-endpoints", Namespace: "test"}: {}, + {Name: "bar", Namespace: "test"}: {}, + {Name: "invalid-for-gateway", Namespace: "test"}: {}, + {Name: "baz", Namespace: "test"}: {}, + {Name: "baz2", Namespace: "test"}: {}, + {Name: "foo", Namespace: "test"}: {}, + {Name: "empty-endpoints", Namespace: "test"}: {}, + {Name: "nil-endpoints", Namespace: "test"}: {}, + {Name: "ipv6-endpoints", Namespace: "test"}: {}, {Name: "policies", Namespace: "test"}: { Policies: []*graph.Policy{ { @@ -3342,6 +3369,8 @@ func TestBuildUpstreams(t *testing.T) { switch svcNsName.Name { case "bar": return barEndpoints, nil + case "invalid-for-gateway": + return invalidEndpoints, nil case "baz": return bazEndpoints, nil case "baz2": @@ -3365,7 +3394,7 @@ func TestBuildUpstreams(t *testing.T) { g := NewWithT(t) - upstreams := buildUpstreams(context.TODO(), listeners, fakeResolver, referencedServices, Dual) + upstreams := buildUpstreams(context.TODO(), gateway, fakeResolver, referencedServices, Dual) g.Expect(upstreams).To(ConsistOf(expUpstreams)) } @@ -3593,24 +3622,19 @@ func TestConvertBackendTLS(t *testing.T) { func TestBuildTelemetry(t *testing.T) { t.Parallel() - telemetryConfigured := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-otel.svc:4563", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), - }, - ServiceName: helpers.GetPointer("my-svc"), - SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ - {Key: "key", Value: "value"}, - }, - }, + telemetryConfigured := &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, }, }, - Valid: true, } createTelemetry := func() Telemetry { @@ -3636,10 +3660,26 @@ func TestBuildTelemetry(t *testing.T) { msg string expTelemetry Telemetry }{ + { + g: &graph.Graph{}, + expTelemetry: Telemetry{}, + msg: "nil Gateway", + }, + { + g: &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: nil, + }, + }, + }, + expTelemetry: Telemetry{}, + msg: "nil effective NginxProxy", + }, { g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: {EffectiveNginxProxy: &graph.EffectiveNginxProxy{}}, }, }, expTelemetry: Telemetry{}, @@ -3647,46 +3687,86 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + }, + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ + ngfAPIv1alpha2.DisableTracing, + }, + }, + }, + }, + }, + }, + expTelemetry: Telemetry{}, + msg: "Telemetry disabled explicitly", + }, + { + g: &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: nil, + }, + }, + }, + }, + }, + expTelemetry: Telemetry{}, + msg: "Telemetry disabled implicitly (nil exporter)", + }, + { + g: &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: nil, + }, }, }, }, - Valid: false, }, }, expTelemetry: Telemetry{}, - msg: "Invalid NginxProxy configured", + msg: "Telemetry disabled implicitly (nil exporter endpoint)", }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, }, - NginxProxy: telemetryConfigured, }, expTelemetry: createTelemetry(), msg: "Telemetry configured", }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -3713,15 +3793,17 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -3783,15 +3865,17 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -3817,7 +3901,7 @@ func TestBuildTelemetry(t *testing.T) { t.Run(tc.msg, func(t *testing.T) { t.Parallel() g := NewWithT(t) - tel := buildTelemetry(tc.g) + tel := buildTelemetry(tc.g, tc.g.Gateways[types.NamespacedName{}]) sort.Slice(tel.Ratios, func(i, j int) bool { return tel.Ratios[i].Value < tel.Ratios[j].Value }) @@ -3850,6 +3934,7 @@ func TestBuildPolicies(t *testing.T) { tests := []struct { name string + gateway *graph.Gateway policies []*graph.Policy expPolicies []string }{ @@ -3862,24 +3947,37 @@ func TestBuildPolicies(t *testing.T) { name: "mix of valid and invalid policies", policies: []*graph.Policy{ { - Source: getPolicy("Kind1", "valid1"), - Valid: true, + Source: getPolicy("Kind1", "valid1"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind2", "valid2"), - Valid: true, + Source: getPolicy("Kind2", "valid2"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind1", "invalid1"), - Valid: false, + Source: getPolicy("Kind1", "invalid1"), + Valid: false, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind2", "invalid2"), - Valid: false, + Source: getPolicy("Kind2", "invalid2"), + Valid: false, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind3", "valid3"), - Valid: true, + Source: getPolicy("Kind3", "valid3"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + }, + gateway: &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, }, }, expPolicies: []string{ @@ -3888,6 +3986,27 @@ func TestBuildPolicies(t *testing.T) { "valid3", }, }, + { + name: "invalid for a Gateway", + policies: []*graph.Policy{ + { + Source: getPolicy("Kind1", "valid1"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway"}: {}, + }, + }, + }, + gateway: &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, + }, + }, + expPolicies: nil, + }, } for _, test := range tests { @@ -3895,7 +4014,7 @@ func TestBuildPolicies(t *testing.T) { t.Parallel() g := NewWithT(t) - pols := buildPolicies(test.policies) + pols := buildPolicies(test.gateway, test.policies) g.Expect(pols).To(HaveLen(len(test.expPolicies))) for _, pol := range pols { g.Expect(test.expPolicies).To(ContainElement(pol.GetName())) @@ -3961,97 +4080,107 @@ func TestCreatePassthroughServers(t *testing.T) { secureAppKey := getL4RouteKey("secure-app") secureApp2Key := getL4RouteKey("secure-app2") secureApp3Key := getL4RouteKey("secure-app3") - testGraph := graph.Graph{ - Gateway: &graph.Gateway{ - Listeners: []*graph.Listener{ - { - Name: "testingListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - Hostname: helpers.GetPointer[v1.Hostname]("*.example.com"), - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureAppKey: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + gateway := &graph.Gateway{ + Listeners: []*graph.Listener{ + { + Name: "testingListener", + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + Hostname: helpers.GetPointer[v1.Hostname]("*.example.com"), + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureAppKey: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, - ParentRefs: []graph.ParentRef{ - { - Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{ - "testingListener": {"app.example.com", "cafe.example.com"}, - }, + }, + ParentRefs: []graph.ParentRef{ + { + Attachment: &graph.ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + gatewayNsName, + "testingListener", + ): {"app.example.com", "cafe.example.com"}, }, - SectionName: nil, - Port: nil, - Gateway: types.NamespacedName{}, - Idx: 0, }, + SectionName: nil, + Port: nil, + Gateway: &graph.ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, + Idx: 0, }, }, - secureApp2Key: {}, }, + secureApp2Key: {}, }, - { - Name: "testingListener2", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - Hostname: helpers.GetPointer[v1.Hostname]("cafe.example.com"), - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureApp3Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + { + Name: "testingListener2", + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + Hostname: helpers.GetPointer[v1.Hostname]("cafe.example.com"), + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureApp3Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, }, }, - { - Name: "httpListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.HTTPProtocolType, - }, + }, + { + Name: "httpListener", + Valid: true, + Source: v1.Listener{ + Protocol: v1.HTTPProtocolType, }, }, }, } - passthroughServers := buildPassthroughServers(&testGraph) + passthroughServers := buildPassthroughServers(gateway) expectedPassthroughServers := []Layer4VirtualServer{ { @@ -4100,79 +4229,107 @@ func TestBuildStreamUpstreams(t *testing.T) { secureApp3Key := getL4RouteKey("secure-app3") secureApp4Key := getL4RouteKey("secure-app4") secureApp5Key := getL4RouteKey("secure-app5") - testGraph := graph.Graph{ - Gateway: &graph.Gateway{ - Listeners: []*graph.Listener{ - { - Name: "testingListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureAppKey: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + secureApp6Key := getL4RouteKey("secure-app6") + + gateway := &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Listeners: []*graph.Listener{ + { + Name: "testingListener", + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureAppKey: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, - secureApp2Key: {}, - secureApp3Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"test.example.com"}, - BackendRef: graph.BackendRef{}, + }, + secureApp2Key: {}, + secureApp3Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"test.example.com"}, + BackendRef: graph.BackendRef{}, + }, + }, + secureApp4Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, + }, + }, }, }, - secureApp4Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + secureApp5Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app2.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureApp5Key.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, - secureApp5Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app2.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureApp5Key.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + secureApp6Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app2.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: {}, + }, + SvcNsName: secureApp6Key.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, @@ -4200,7 +4357,7 @@ func TestBuildStreamUpstreams(t *testing.T) { return fakeEndpoints, nil } - streamUpstreams := buildStreamUpstreams(context.Background(), testGraph.Gateway.Listeners, &fakeResolver, Dual) + streamUpstreams := buildStreamUpstreams(context.Background(), gateway, &fakeResolver, Dual) expectedStreamUpstreams := []Upstream{ { @@ -4227,9 +4384,10 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "no rewrite IP settings configured", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, + }, }, }, expRewriteIPSettings: RewriteClientIPSettings{}, @@ -4237,15 +4395,14 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with proxyProtocol", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.9.9.4/32", }, }, @@ -4264,15 +4421,14 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with xForwardedFor", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "76.89.90.11/24", }, }, @@ -4291,27 +4447,26 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with recursive set to false and multiple trusted addresses", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "5.5.5.5/12", }, { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "1.1.1.1/26", }, { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2.2.2.2/32", }, { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "3.3.3.3/24", }, }, @@ -4333,7 +4488,7 @@ func TestBuildRewriteIPSettings(t *testing.T) { t.Run(tc.msg, func(t *testing.T) { t.Parallel() g := NewWithT(t) - baseConfig := buildBaseHTTPConfig(tc.g) + baseConfig := buildBaseHTTPConfig(tc.g, tc.g.Gateways[types.NamespacedName{}]) g.Expect(baseConfig.RewriteClientIPSettings).To(Equal(tc.expRewriteIPSettings)) }) } @@ -4345,133 +4500,113 @@ func TestBuildLogging(t *testing.T) { t.Parallel() tests := []struct { msg string - g *graph.Graph + gw *graph.Gateway expLoggingSettings Logging }{ { - msg: "NginxProxy is nil", - g: &graph.Graph{}, + msg: "Gateway is nil", + gw: nil, expLoggingSettings: defaultLogging, }, { - msg: "NginxProxy does not specify log level", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{}, - }, + msg: "Gateway has no effective NginxProxy", + gw: &graph.Gateway{ + EffectiveNginxProxy: nil, + }, + expLoggingSettings: defaultLogging, + }, + { + msg: "Effective NginxProxy does not specify log level", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, }, expLoggingSettings: defaultLogging, }, { - msg: "NginxProxy log level set to debug", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelDebug)}, - }, + msg: "Effective NginxProxy log level set to debug", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, }, }, expLoggingSettings: Logging{ErrorLevel: "debug"}, }, { - msg: "NginxProxy log level set to info", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelInfo)}, - }, + msg: "Effective NginxProxy log level set to info", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), }, }, }, expLoggingSettings: Logging{ErrorLevel: defaultErrorLogLevel}, }, { - msg: "NginxProxy log level set to notice", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelNotice)}, - }, + msg: "Effective NginxProxy log level set to notice", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), }, }, }, expLoggingSettings: Logging{ErrorLevel: "notice"}, }, { - msg: "NginxProxy log level set to warn", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelWarn)}, - }, + msg: "Effective NginxProxy log level set to warn", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), }, }, }, expLoggingSettings: Logging{ErrorLevel: "warn"}, }, { - msg: "NginxProxy log level set to error", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelError)}, - }, + msg: "Effective NginxProxy log level set to error", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, }, expLoggingSettings: Logging{ErrorLevel: "error"}, }, { - msg: "NginxProxy log level set to crit", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelCrit)}, - }, + msg: "Effective NginxProxy log level set to crit", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), }, }, }, expLoggingSettings: Logging{ErrorLevel: "crit"}, }, { - msg: "NginxProxy log level set to alert", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelAlert)}, - }, + msg: "Effective NginxProxy log level set to alert", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), }, }, }, expLoggingSettings: Logging{ErrorLevel: "alert"}, }, { - msg: "NginxProxy log level set to emerg", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelEmerg)}, - }, + msg: "Effective NginxProxy log level set to emerg", + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), }, }, }, @@ -4484,7 +4619,7 @@ func TestBuildLogging(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildLogging(tc.g)).To(Equal(tc.expLoggingSettings)) + g.Expect(buildLogging(tc.gw)).To(Equal(tc.expLoggingSettings)) }) } } @@ -4705,38 +4840,28 @@ func TestBuildNginxPlus(t *testing.T) { t.Parallel() tests := []struct { msg string - g *graph.Graph + gw *graph.Gateway expNginxPlus NginxPlus }{ { msg: "NginxProxy is nil", - g: &graph.Graph{}, + gw: &graph.Gateway{}, expNginxPlus: defaultNginxPlus, }, { msg: "NginxPlus default values are used when NginxProxy doesn't specify NginxPlus settings", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{}, - }, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, }, expNginxPlus: defaultNginxPlus, }, { msg: "NginxProxy specifies one allowed address", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - }, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, }, }, }, @@ -4745,17 +4870,12 @@ func TestBuildNginxPlus(t *testing.T) { }, { msg: "NginxProxy specifies multiple allowed addresses", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, }, @@ -4764,16 +4884,11 @@ func TestBuildNginxPlus(t *testing.T) { }, { msg: "NginxProxy specifies 127.0.0.1 as allowed address", - g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, - }, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, }, }, }, @@ -4787,7 +4902,7 @@ func TestBuildNginxPlus(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildNginxPlus(tc.g)).To(Equal(tc.expNginxPlus)) + g.Expect(buildNginxPlus(tc.gw)).To(Equal(tc.expNginxPlus)) }) } } diff --git a/internal/mode/static/state/dataplane/types.go b/internal/mode/static/state/dataplane/types.go index 55e5476f51..975edb4c33 100644 --- a/internal/mode/static/state/dataplane/types.go +++ b/internal/mode/static/state/dataplane/types.go @@ -54,8 +54,6 @@ type Configuration struct { NginxPlus NginxPlus // BaseHTTPConfig holds the configuration options at the http context. BaseHTTPConfig BaseHTTPConfig - // Version represents the version of the generated configuration. - Version int } // SSLKeyPairID is a unique identifier for a SSLKeyPair. diff --git a/internal/mode/static/state/graph/backend_refs.go b/internal/mode/static/state/graph/backend_refs.go index ad676f59c5..cf9339cc7a 100644 --- a/internal/mode/static/state/graph/backend_refs.go +++ b/internal/mode/static/state/graph/backend_refs.go @@ -11,8 +11,7 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha3" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" - + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" @@ -23,6 +22,10 @@ import ( type BackendRef struct { // BackendTLSPolicy is the BackendTLSPolicy of the Service which is referenced by the backendRef. BackendTLSPolicy *BackendTLSPolicy + // InvalidForGateways is a map of Gateways for which this BackendRef is invalid for, with the corresponding + // condition. Certain NginxProxy configurations may result in a backend not being valid for some Gateways, + // but not others. + InvalidForGateways map[types.NamespacedName]conditions.Condition // SvcNsName is the NamespacedName of the Service referenced by the backendRef. SvcNsName types.NamespacedName // ServicePort is the ServicePort of the Service which is referenced by the backendRef. @@ -49,10 +52,9 @@ func addBackendRefsToRouteRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, ) { for _, r := range routes { - addBackendRefsToRules(r, refGrantResolver, services, backendTLSPolicies, npCfg) + addBackendRefsToRules(r, refGrantResolver, services, backendTLSPolicies) } } @@ -63,7 +65,6 @@ func addBackendRefsToRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, ) { if !route.Valid { return @@ -92,19 +93,18 @@ func addBackendRefsToRules( } routeNs := route.Source.GetNamespace() - ref, cond := createBackendRef( + ref, conds := createBackendRef( ref, - routeNs, + route, refGrantResolver.refAllowedFrom(getRefGrantFromResourceForRoute(route.RouteType, routeNs)), services, refPath, backendTLSPolicies, - npCfg, ) backendRefs = append(backendRefs, ref) - if cond != nil { - route.Conditions = append(route.Conditions, *cond) + if len(conds) > 0 { + route.Conditions = append(route.Conditions, conds...) } } @@ -124,13 +124,12 @@ func addBackendRefsToRules( func createBackendRef( ref RouteBackendRef, - sourceNamespace string, + route *L7Route, refGrantResolver func(resource toResource) bool, services map[types.NamespacedName]*v1.Service, refPath *field.Path, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, -) (BackendRef, *conditions.Condition) { +) (BackendRef, []conditions.Condition) { // Data plane will handle invalid ref by responding with 500. // Because of that, we always need to add a BackendRef to group.Backends, even if the ref is invalid. // Additionally, we always calculate the weight, even if it is invalid. @@ -144,80 +143,75 @@ func createBackendRef( } } - var backendRef BackendRef - - valid, cond := validateRouteBackendRef(ref, sourceNamespace, refGrantResolver, refPath) + valid, cond := validateRouteBackendRef(ref, route.Source.GetNamespace(), refGrantResolver, refPath) if !valid { - backendRef = BackendRef{ - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + Weight: weight, + Valid: false, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - return backendRef, &cond + return backendRef, []conditions.Condition{cond} } - ns := sourceNamespace + ns := route.Source.GetNamespace() if ref.Namespace != nil { ns = string(*ref.Namespace) } svcNsName := types.NamespacedName{Name: string(ref.Name), Namespace: ns} svcIPFamily, svcPort, err := getIPFamilyAndPortFromRef(ref.BackendRef, svcNsName, services, refPath) if err != nil { - backendRef = BackendRef{ - Weight: weight, - Valid: false, - SvcNsName: svcNsName, - ServicePort: v1.ServicePort{}, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + Weight: weight, + Valid: false, + SvcNsName: svcNsName, + ServicePort: v1.ServicePort{}, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - cond := staticConds.NewRouteBackendRefRefBackendNotFound(err.Error()) - return backendRef, &cond + return backendRef, []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())} } - if err := verifyIPFamily(npCfg, svcIPFamily); err != nil { - backendRef = BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + var conds []conditions.Condition + invalidForGateways := make(map[types.NamespacedName]conditions.Condition) + for _, parentRef := range route.ParentRefs { + if err := verifyIPFamily(parentRef.Gateway.EffectiveNginxProxy, svcIPFamily); err != nil { + invalidForGateways[parentRef.Gateway.NamespacedName] = staticConds.NewRouteInvalidIPFamily(err.Error()) } - - cond := staticConds.NewRouteInvalidIPFamily(err.Error()) - return backendRef, &cond } backendTLSPolicy, err := findBackendTLSPolicyForService( backendTLSPolicies, ref.Namespace, string(ref.Name), - sourceNamespace, + route.Source.GetNamespace(), ) if err != nil { - backendRef = BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + SvcNsName: svcNsName, + ServicePort: svcPort, + Weight: weight, + Valid: false, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: invalidForGateways, } - cond := staticConds.NewRouteBackendRefUnsupportedValue(err.Error()) - return backendRef, &cond + return backendRef, append(conds, staticConds.NewRouteBackendRefUnsupportedValue(err.Error())) } - backendRef = BackendRef{ - SvcNsName: svcNsName, - BackendTLSPolicy: backendTLSPolicy, - ServicePort: svcPort, - Valid: true, - Weight: weight, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + SvcNsName: svcNsName, + BackendTLSPolicy: backendTLSPolicy, + ServicePort: svcPort, + Valid: true, + Weight: weight, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: invalidForGateways, } - return backendRef, nil + return backendRef, conds } // validateBackendTLSPolicyMatchingAllBackends validates that all backends in a rule reference the same @@ -327,30 +321,32 @@ func getIPFamilyAndPortFromRef( return svc.Spec.IPFamilies, svcPort, nil } -func verifyIPFamily(npCfg *NginxProxy, svcIPFamily []v1.IPFamily) error { - if npCfg == nil || npCfg.Source == nil || !npCfg.Valid { +func verifyIPFamily(npCfg *EffectiveNginxProxy, svcIPFamily []v1.IPFamily) error { + if npCfg == nil { return nil } - // we can access this field since we have already validated that ipFamily is not nil in validateNginxProxy. - npIPFamily := npCfg.Source.Spec.IPFamily - if *npIPFamily == ngfAPI.IPv4 { - if slices.Contains(svcIPFamily, v1.IPv6Protocol) { - // capitalizing error message to match the rest of the error messages associated with a condition - //nolint: stylecheck - return errors.New( - "service configured with IPv6 family but NginxProxy is configured with IPv4", - ) - } + containsIPv6 := slices.Contains(svcIPFamily, v1.IPv6Protocol) + containsIPv4 := slices.Contains(svcIPFamily, v1.IPv4Protocol) + + //nolint: stylecheck // used in status condition which is normally capitalized + errIPv6Mismatch := errors.New("service configured with IPv6 family but NginxProxy is configured with IPv4") + //nolint: stylecheck // used in status condition which is normally capitalized + errIPv4Mismatch := errors.New("service configured with IPv4 family but NginxProxy is configured with IPv6") + + npIPFamily := npCfg.IPFamily + + if npIPFamily == nil { + // default is dual so we don't need to check the service IPFamily. + return nil } - if *npIPFamily == ngfAPI.IPv6 { - if slices.Contains(svcIPFamily, v1.IPv4Protocol) { - // capitalizing error message to match the rest of the error messages associated with a condition - //nolint: stylecheck - return errors.New( - "service configured with IPv4 family but NginxProxy is configured with IPv6", - ) - } + + if *npIPFamily == ngfAPIv1alpha2.IPv4 && containsIPv6 { + return errIPv6Mismatch + } + + if *npIPFamily == ngfAPIv1alpha2.IPv6 && containsIPv4 { + return errIPv4Mismatch } return nil diff --git a/internal/mode/static/state/graph/backend_refs_test.go b/internal/mode/static/state/graph/backend_refs_test.go index 0d43456eed..543973758a 100644 --- a/internal/mode/static/state/graph/backend_refs_test.go +++ b/internal/mode/static/state/graph/backend_refs_test.go @@ -14,7 +14,7 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha2" "sigs.k8s.io/gateway-api/apis/v1alpha3" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" @@ -324,55 +324,35 @@ func TestVerifyIPFamily(t *testing.T) { test := []struct { name string expErr error - npCfg *NginxProxy + npCfg *EffectiveNginxProxy svcIPFamily []v1.IPFamily }{ { name: "Valid - IPv6 and IPv4 configured for NGINX, service has only IPv4", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, svcIPFamily: []v1.IPFamily{v1.IPv4Protocol}, }, { name: "Valid - IPv6 and IPv4 configured for NGINX, service has only IPv6", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, svcIPFamily: []v1.IPFamily{v1.IPv6Protocol}, }, { name: "Invalid - IPv4 configured for NGINX, service has only IPv6", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.IPv4), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv4), }, svcIPFamily: []v1.IPFamily{v1.IPv6Protocol}, expErr: errors.New("service configured with IPv6 family but NginxProxy is configured with IPv4"), }, { name: "Invalid - IPv6 configured for NGINX, service has only IPv4", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.IPv6), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), }, svcIPFamily: []v1.IPFamily{v1.IPv4Protocol}, expErr: errors.New("service configured with IPv4 family but NginxProxy is configured with IPv6"), @@ -397,13 +377,13 @@ func TestVerifyIPFamily(t *testing.T) { } } -func TestAddBackendRefsToRulesTest(t *testing.T) { +func TestAddBackendRefsToRules(t *testing.T) { t.Parallel() sectionNameRefs := []ParentRef{ { Idx: 0, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway"}, + Gateway: &ParentRefGateway{NamespacedName: types.NamespacedName{Namespace: "test", Name: "gateway"}}, Attachment: &ParentRefAttachmentStatus{ Attached: true, }, @@ -609,10 +589,11 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr1", "Service", 1, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -623,16 +604,18 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr2", "Service", 2, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[1], - Valid: true, - Weight: 5, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[1], + Valid: true, + Weight: 5, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -643,18 +626,20 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr2", "Service", 2, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, - BackendTLSPolicy: btp3, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + BackendTLSPolicy: btp3, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[1], - Valid: true, - Weight: 5, - BackendTLSPolicy: btp3, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[1], + Valid: true, + Weight: 5, + BackendTLSPolicy: btp3, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -695,7 +680,8 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr3", "NotService", 1, "svc1"), expectedBackendRefs: []BackendRef{ { - Weight: 1, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: []conditions.Condition{ @@ -713,18 +699,20 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { }), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: false, - Weight: 1, - BackendTLSPolicy: btp1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: false, + Weight: 1, + BackendTLSPolicy: btp1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc2NsName, - ServicePort: svc2.Spec.Ports[1], - Valid: false, - Weight: 5, - BackendTLSPolicy: btp2, + SvcNsName: svc2NsName, + ServicePort: svc2.Spec.Ports[1], + Valid: false, + Weight: 5, + BackendTLSPolicy: btp2, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: []conditions.Condition{ @@ -752,7 +740,7 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(nil) - addBackendRefsToRules(test.route, resolver, services, test.policies, nil) + addBackendRefsToRules(test.route, resolver, services, test.policies) var actual []BackendRef if test.route.Spec.Rules != nil { @@ -844,11 +832,11 @@ func TestCreateBackend(t *testing.T) { } tests := []struct { - expectedCondition *conditions.Condition - nginxProxy *NginxProxy + nginxProxySpec *EffectiveNginxProxy name string expectedServicePortReference string ref gatewayv1.HTTPBackendRef + expectedConditions []conditions.Condition expectedBackend BackendRef }{ { @@ -856,13 +844,14 @@ func TestCreateBackend(t *testing.T) { BackendRef: getNormalRef(), }, expectedBackend: BackendRef{ - SvcNsName: svc1NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: true, + SvcNsName: svc1NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service1_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal case", }, { @@ -873,13 +862,14 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc1NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 1, - Valid: true, + SvcNsName: svc1NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 1, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service1_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal with nil weight", }, { @@ -890,17 +880,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: types.NamespacedName{}, - ServicePort: v1.ServicePort{}, - Weight: 0, - Valid: false, + SvcNsName: types.NamespacedName{}, + ServicePort: v1.ServicePort{}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + Weight: 0, + Valid: false, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefUnsupportedValue( "test.weight: Invalid value: -1: must be in the range [0, 1000000]", ), - ), + }, name: "invalid weight", }, { @@ -911,17 +902,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: types.NamespacedName{}, - ServicePort: v1.ServicePort{}, - Weight: 5, - Valid: false, + SvcNsName: types.NamespacedName{}, + ServicePort: v1.ServicePort{}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + Weight: 5, + Valid: false, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefInvalidKind( `test.kind: Unsupported value: "NotService": supported values: "Service"`, ), - ), + }, name: "invalid kind", }, { @@ -938,36 +930,33 @@ func TestCreateBackend(t *testing.T) { Namespace: "test", Name: "not-exist", }, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefRefBackendNotFound(`test.name: Not found: "not-exist"`), - ), + }, name: "service doesn't exist", }, { ref: gatewayv1.HTTPBackendRef{ - BackendRef: getModifiedRef(func(backend gatewayv1.BackendRef) gatewayv1.BackendRef { - backend.Name = "service2" - return backend - }), + BackendRef: getNormalRef(), }, expectedBackend: BackendRef{ - SvcNsName: svc2NamespacedName, + SvcNsName: svc1NamespacedName, ServicePort: svc1.Spec.Ports[0], Weight: 5, - Valid: false, - }, - nginxProxy: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: staticConds.NewRouteInvalidIPFamily( + `service configured with IPv4 family but NginxProxy is configured with IPv6`, + ), }, - Valid: true, }, - expectedCondition: helpers.GetPointer( - staticConds.NewRouteInvalidIPFamily(`service configured with IPv4 family but NginxProxy is configured with IPv6`), - ), - name: "service IPFamily doesn't match NginxProxy IPFamily", + expectedServicePortReference: "test_service1_80", + nginxProxySpec: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6)}, + expectedConditions: nil, + name: "service IPFamily doesn't match NginxProxy IPFamily", }, { ref: gatewayv1.HTTPBackendRef{ @@ -977,14 +966,15 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc2NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: true, - BackendTLSPolicy: &btp, + SvcNsName: svc2NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: true, + BackendTLSPolicy: &btp, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service2_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal case with policy", }, { @@ -995,17 +985,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc3NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: false, + SvcNsName: svc3NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefUnsupportedValue( "the backend TLS policy is invalid: unsupported value", ), - ), + }, name: "invalid policy", }, } @@ -1020,8 +1011,6 @@ func TestCreateBackend(t *testing.T) { client.ObjectKeyFromObject(btp2.Source): &btp2, } - sourceNamespace := "test" - refPath := field.NewPath("test") alwaysTrueRefGrantResolver := func(_ toResource) bool { return true } @@ -1036,18 +1025,36 @@ func TestCreateBackend(t *testing.T) { test.ref.BackendRef, []any{}, } - backend, cond := createBackendRef( + route := &L7Route{ + Source: &gatewayv1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + }, + }, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + EffectiveNginxProxy: test.nginxProxySpec, + }, + }, + }, + } + + backend, conds := createBackendRef( rbr, - sourceNamespace, + route, alwaysTrueRefGrantResolver, services, refPath, policies, - test.nginxProxy, ) g.Expect(helpers.Diff(test.expectedBackend, backend)).To(BeEmpty()) - g.Expect(cond).To(Equal(test.expectedCondition)) + g.Expect(conds).To(Equal(test.expectedConditions)) servicePortRef := backend.ServicePortReference() g.Expect(servicePortRef).To(Equal(test.expectedServicePortReference)) @@ -1062,14 +1069,31 @@ func TestCreateBackend(t *testing.T) { []any{}, } + route := &L7Route{ + Source: &gatewayv1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + }, + }, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, + }, + }, + } + backend, conds := createBackendRef( ref, - "test-ns", + route, alwaysTrueRefGrantResolver, services, refPath, policies, - nil, ) g.Expect(conds).To(BeNil()) diff --git a/internal/mode/static/state/graph/backend_tls_policy.go b/internal/mode/static/state/graph/backend_tls_policy.go index 4e00eecc56..67563eefe5 100644 --- a/internal/mode/static/state/graph/backend_tls_policy.go +++ b/internal/mode/static/state/graph/backend_tls_policy.go @@ -10,6 +10,7 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha3" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) @@ -18,8 +19,8 @@ type BackendTLSPolicy struct { Source *v1alpha3.BackendTLSPolicy // CaCertRef is the name of the ConfigMap that contains the CA certificate. CaCertRef types.NamespacedName - // Gateway is the name of the Gateway that is being checked for this BackendTLSPolicy. - Gateway types.NamespacedName + // Gateways are the names of the Gateways that are being checked for this BackendTLSPolicy. + Gateways []types.NamespacedName // Conditions include Conditions for the BackendTLSPolicy. Conditions []conditions.Condition // Valid shows whether the BackendTLSPolicy is valid. @@ -35,9 +36,9 @@ func processBackendTLSPolicies( configMapResolver *configMapResolver, secretResolver *secretResolver, ctlrName string, - gateway *Gateway, + gateways map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*BackendTLSPolicy { - if len(backendTLSPolicies) == 0 || gateway == nil { + if len(backendTLSPolicies) == 0 || len(gateways) == 0 { return nil } @@ -57,12 +58,8 @@ func processBackendTLSPolicies( Source: backendTLSPolicy, Valid: valid, Conditions: conds, - Gateway: types.NamespacedName{ - Namespace: gateway.Source.Namespace, - Name: gateway.Source.Name, - }, - CaCertRef: caCertRef, - Ignored: ignored, + CaCertRef: caCertRef, + Ignored: ignored, } } return processedBackendTLSPolicies @@ -134,7 +131,7 @@ func validateBackendTLSCACertRef( secretResolver *secretResolver, ) error { if len(btp.Spec.Validation.CACertificateRefs) != 1 { - path := field.NewPath("tls.cacertrefs") + path := field.NewPath("validation.caCertificateRefs") valErr := field.TooMany(path, len(btp.Spec.Validation.CACertificateRefs), 1) return valErr } @@ -143,13 +140,13 @@ func validateBackendTLSCACertRef( allowedCaCertKinds := []v1.Kind{"ConfigMap", "Secret"} if !slices.Contains(allowedCaCertKinds, selectedCertRef.Kind) { - path := field.NewPath("tls.cacertrefs[0].kind") + path := field.NewPath("validation.caCertificateRefs[0].kind") valErr := field.NotSupported(path, btp.Spec.Validation.CACertificateRefs[0].Kind, allowedCaCertKinds) return valErr } if selectedCertRef.Group != "" && selectedCertRef.Group != "core" { - path := field.NewPath("tls.cacertrefs[0].group") + path := field.NewPath("validation.caCertificateRefs[0].group") valErr := field.NotSupported(path, selectedCertRef.Group, []string{"", "core"}) return valErr } @@ -161,12 +158,12 @@ func validateBackendTLSCACertRef( switch selectedCertRef.Kind { case "ConfigMap": if err := configMapResolver.resolve(nsName); err != nil { - path := field.NewPath("tls.cacertrefs[0]") + path := field.NewPath("validation.caCertificateRefs[0]") return field.Invalid(path, selectedCertRef, err.Error()) } case "Secret": if err := secretResolver.resolve(nsName); err != nil { - path := field.NewPath("tls.cacertrefs[0]") + path := field.NewPath("validation.caCertificateRefs[0]") return field.Invalid(path, selectedCertRef, err.Error()) } default: @@ -186,3 +183,32 @@ func validateBackendTLSWellKnownCACerts(btp *v1alpha3.BackendTLSPolicy) error { } return nil } + +func addGatewaysForBackendTLSPolicies( + backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, + services map[types.NamespacedName]*ReferencedService, +) { + for _, backendTLSPolicy := range backendTLSPolicies { + gateways := make(map[types.NamespacedName]struct{}) + + for _, refs := range backendTLSPolicy.Source.Spec.TargetRefs { + if refs.Kind != kinds.Service { + continue + } + + for svcNsName, referencedServices := range services { + if svcNsName.Name != string(refs.Name) { + continue + } + + for gateway := range referencedServices.GatewayNsNames { + gateways[gateway] = struct{}{} + } + } + } + + for gateway := range gateways { + backendTLSPolicy.Gateways = append(backendTLSPolicy.Gateways, gateway) + } + } +} diff --git a/internal/mode/static/state/graph/backend_tls_policy_test.go b/internal/mode/static/state/graph/backend_tls_policy_test.go index cea42d64e9..12f0d7264f 100644 --- a/internal/mode/static/state/graph/backend_tls_policy_test.go +++ b/internal/mode/static/state/graph/backend_tls_policy_test.go @@ -46,27 +46,29 @@ func TestProcessBackendTLSPoliciesEmpty(t *testing.T) { }, } - gateway := &Gateway{ - Source: &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "gateway", Namespace: "test"}}, + gateway := map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway"}: { + Source: &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "gateway", Namespace: "test"}}, + }, } tests := []struct { expected map[types.NamespacedName]*BackendTLSPolicy - gateway *Gateway + gateways map[types.NamespacedName]*Gateway backendTLSPolicies map[types.NamespacedName]*v1alpha3.BackendTLSPolicy name string }{ { name: "no policies", expected: nil, - gateway: gateway, + gateways: gateway, backendTLSPolicies: nil, }, { name: "nil gateway", expected: nil, backendTLSPolicies: backendTLSPolicies, - gateway: nil, + gateways: nil, }, } @@ -75,7 +77,7 @@ func TestProcessBackendTLSPoliciesEmpty(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processBackendTLSPolicies(test.backendTLSPolicies, nil, nil, "test", test.gateway) + processed := processBackendTLSPolicies(test.backendTLSPolicies, nil, nil, "test", test.gateways) g.Expect(processed).To(Equal(test.expected)) }) @@ -93,6 +95,15 @@ func TestValidateBackendTLSPolicy(t *testing.T) { }, } + targetRefInvalidKind := []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Invalid", + Name: "service1", + }, + }, + } + localObjectRefNormalCase := []gatewayv1.LocalObjectReference{ { Kind: "ConfigMap", @@ -119,7 +130,7 @@ func TestValidateBackendTLSPolicy(t *testing.T) { localObjectRefInvalidKind := []gatewayv1.LocalObjectReference{ { - Kind: "Secret", + Kind: "Invalid", Name: "secret", Group: "", }, @@ -299,7 +310,7 @@ func TestValidateBackendTLSPolicy(t *testing.T) { Namespace: "test", }, Spec: v1alpha3.BackendTLSPolicySpec{ - TargetRefs: targetRefNormalCase, + TargetRefs: targetRefInvalidKind, Validation: v1alpha3.BackendTLSPolicyValidation{ CACertificateRefs: localObjectRefInvalidKind, Hostname: "foo.test.com", @@ -475,3 +486,182 @@ func TestValidateBackendTLSPolicy(t *testing.T) { }) } } + +func TestAddGatewaysForBackendTLSPolicies(t *testing.T) { + t.Parallel() + + btp1 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp1", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service1", + }, + }, + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service2", + }, + }, + }, + }, + }, + } + btp1Expected := btp1 + + btp1Expected.Gateways = []types.NamespacedName{ + {Namespace: "test", Name: "gateway1"}, + {Namespace: "test", Name: "gateway2"}, + {Namespace: "test", Name: "gateway3"}, + } + + btp2 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp2", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service3", + }, + }, + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service4", + }, + }, + }, + }, + }, + } + + btp2Expected := btp2 + btp2Expected.Gateways = []types.NamespacedName{ + {Namespace: "test", Name: "gateway4"}, + } + + btp3 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp3", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service-does-not-exist", + }, + }, + }, + }, + }, + } + + btp4 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp4", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Gateway", + Name: "gateway", + }, + }, + }, + }, + }, + } + + tests := []struct { + backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy + services map[types.NamespacedName]*ReferencedService + expected map[types.NamespacedName]*BackendTLSPolicy + name string + }{ + { + name: "add multiple gateways to backend tls policies", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp1"}: btp1, + {Namespace: "test", Name: "btp2"}: btp2, + }, + services: map[types.NamespacedName]*ReferencedService{ + {Namespace: "test", Name: "service1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway1"}: {}, + }, + }, + {Namespace: "test", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway2"}: {}, + {Namespace: "test", Name: "gateway3"}: {}, + }, + }, + {Namespace: "test", Name: "service3"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway4"}: {}, + }, + }, + {Namespace: "test", Name: "service4"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway4"}: {}, + }, + }, + }, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp1"}: btp1Expected, + {Namespace: "test", Name: "btp2"}: btp2Expected, + }, + }, + { + name: "backend tls policy with a service target ref that does not reference a gateway", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp3"}: btp3, + }, + services: map[types.NamespacedName]*ReferencedService{ + {Namespace: "test", Name: "service1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{}, + }, + }, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp3"}: btp3, + }, + }, + { + name: "backend tls policy that does not reference a service", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp4"}: btp4, + }, + services: map[types.NamespacedName]*ReferencedService{}, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp4"}: btp4, + }, + }, + } + + for _, test := range tests { + g := NewWithT(t) + t.Run(test.name, func(t *testing.T) { + t.Parallel() + addGatewaysForBackendTLSPolicies(test.backendTLSPolicies, test.services) + g.Expect(helpers.Diff(test.backendTLSPolicies, test.expected)).To(BeEmpty()) + }) + } +} diff --git a/internal/mode/static/state/graph/gateway.go b/internal/mode/static/state/graph/gateway.go index 2ee69b5546..3ff21ce44f 100644 --- a/internal/mode/static/state/graph/gateway.go +++ b/internal/mode/static/state/graph/gateway.go @@ -1,22 +1,31 @@ package graph import ( - "sort" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - ngfsort "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) -// Gateway represents the winning Gateway resource. +// Gateway represents a Gateway resource. type Gateway struct { + // LatestReloadResult is the result of the last nginx reload attempt. + LatestReloadResult NginxReloadResult // Source is the corresponding Gateway resource. Source *v1.Gateway + // NginxProxy is the NginxProxy referenced by this Gateway. + NginxProxy *NginxProxy + // EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on + // the GatewayClass resource. This is the effective set of config that should be applied to the Gateway. + // If non-nil, then this config is valid. + EffectiveNginxProxy *EffectiveNginxProxy + // DeploymentName is the name of the nginx Deployment associated with this Gateway. + DeploymentName types.NamespacedName // Listeners include the listeners of the Gateway. Listeners []*Listener // Conditions holds the conditions for the Gateway. @@ -27,100 +36,141 @@ type Gateway struct { Valid bool } -// processedGateways holds the resources that belong to NGF. -type processedGateways struct { - Winner *v1.Gateway - Ignored map[types.NamespacedName]*v1.Gateway -} +// processGateways determines which Gateway resources belong to NGF (determined by the Gateway GatewayClassName field). +func processGateways( + gws map[types.NamespacedName]*v1.Gateway, + gcName string, +) map[types.NamespacedName]*v1.Gateway { + referencedGws := make(map[types.NamespacedName]*v1.Gateway) + + for gwNsName, gw := range gws { + if string(gw.Spec.GatewayClassName) != gcName { + continue + } -// GetAllNsNames returns all the NamespacedNames of the Gateway resources that belong to NGF. -func (gws processedGateways) GetAllNsNames() []types.NamespacedName { - winnerCnt := 0 - if gws.Winner != nil { - winnerCnt = 1 + referencedGws[gwNsName] = gw } - length := winnerCnt + len(gws.Ignored) - if length == 0 { + if len(referencedGws) == 0 { return nil } - allNsNames := make([]types.NamespacedName, 0, length) + return referencedGws +} - if gws.Winner != nil { - allNsNames = append(allNsNames, client.ObjectKeyFromObject(gws.Winner)) - } - for nsName := range gws.Ignored { - allNsNames = append(allNsNames, nsName) +func buildGateways( + gws map[types.NamespacedName]*v1.Gateway, + secretResolver *secretResolver, + gc *GatewayClass, + refGrantResolver *referenceGrantResolver, + nps map[types.NamespacedName]*NginxProxy, +) map[types.NamespacedName]*Gateway { + if len(gws) == 0 { + return nil } - return allNsNames -} + builtGateways := make(map[types.NamespacedName]*Gateway, len(gws)) -// processGateways determines which Gateway resource belong to NGF (determined by the Gateway GatewayClassName field). -func processGateways( - gws map[types.NamespacedName]*v1.Gateway, - gcName string, -) processedGateways { - referencedGws := make([]*v1.Gateway, 0, len(gws)) + for gwNsName, gw := range gws { + var np *NginxProxy + var npNsName types.NamespacedName + if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { + npNsName = types.NamespacedName{Namespace: gw.Namespace, Name: gw.Spec.Infrastructure.ParametersRef.Name} + np = nps[npNsName] + } - for _, gw := range gws { - if string(gw.Spec.GatewayClassName) != gcName { - continue + var gcNp *NginxProxy + if gc != nil { + gcNp = gc.NginxProxy } - referencedGws = append(referencedGws, gw) - } + effectiveNginxProxy := buildEffectiveNginxProxy(gcNp, np) - if len(referencedGws) == 0 { - return processedGateways{} - } + conds, valid := validateGateway(gw, gc, np) - sort.Slice(referencedGws, func(i, j int) bool { - return ngfsort.LessClientObject(referencedGws[i], referencedGws[j]) - }) + protectedPorts := make(ProtectedPorts) + if port, enabled := MetricsEnabledForNginxProxy(effectiveNginxProxy); enabled { + metricsPort := config.DefaultNginxMetricsPort + if port != nil { + metricsPort = *port + } + protectedPorts[metricsPort] = "MetricsPort" + } - ignoredGws := make(map[types.NamespacedName]*v1.Gateway) + deploymentName := types.NamespacedName{ + Namespace: gw.GetNamespace(), + Name: controller.CreateNginxResourceName(gw.GetName(), string(gw.Spec.GatewayClassName)), + } - for _, gw := range referencedGws[1:] { - ignoredGws[client.ObjectKeyFromObject(gw)] = gw + if !valid { + builtGateways[gwNsName] = &Gateway{ + Source: gw, + Valid: false, + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Conditions: conds, + DeploymentName: deploymentName, + } + } else { + builtGateways[gwNsName] = &Gateway{ + Source: gw, + Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Valid: true, + Conditions: conds, + DeploymentName: deploymentName, + } + } } - return processedGateways{ - Winner: referencedGws[0], - Ignored: ignoredGws, - } + return builtGateways } -func buildGateway( - gw *v1.Gateway, - secretResolver *secretResolver, - gc *GatewayClass, - refGrantResolver *referenceGrantResolver, - protectedPorts ProtectedPorts, -) *Gateway { - if gw == nil { - return nil +func validateGatewayParametersRef(npCfg *NginxProxy, ref v1.LocalParametersReference) []conditions.Condition { + var conds []conditions.Condition + + path := field.NewPath("spec.infrastructure.parametersRef") + + if _, ok := supportedParamKinds[string(ref.Kind)]; !ok { + err := field.NotSupported(path.Child("kind"), string(ref.Kind), []string{kinds.NginxProxy}) + conds = append( + conds, + staticConds.NewGatewayRefInvalid(err.Error()), + staticConds.NewGatewayInvalidParameters(err.Error()), + ) + + return conds } - conds := validateGateway(gw, gc) + if npCfg == nil { + conds = append( + conds, + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters( + field.NotFound(path.Child("name"), ref.Name).Error(), + ), + ) - if len(conds) > 0 { - return &Gateway{ - Source: gw, - Valid: false, - Conditions: conds, - } + return conds } - return &Gateway{ - Source: gw, - Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), - Valid: true, + if !npCfg.Valid { + msg := npCfg.ErrMsgs.ToAggregate().Error() + conds = append( + conds, + staticConds.NewGatewayRefInvalid(msg), + staticConds.NewGatewayInvalidParameters(msg), + ) + + return conds } + + conds = append(conds, staticConds.NewGatewayResolvedRefs()) + return conds } -func validateGateway(gw *v1.Gateway, gc *GatewayClass) []conditions.Condition { +func validateGateway(gw *v1.Gateway, gc *GatewayClass, npCfg *NginxProxy) ([]conditions.Condition, bool) { var conds []conditions.Condition if gc == nil { @@ -136,5 +186,14 @@ func validateGateway(gw *v1.Gateway, gc *GatewayClass) []conditions.Condition { conds = append(conds, staticConds.NewGatewayUnsupportedValue(valErr.Error())...) } - return conds + // we evaluate validity before validating parametersRef because an invalid parametersRef/NginxProxy does not + // invalidate the entire Gateway. + valid := len(conds) == 0 + + if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { + paramConds := validateGatewayParametersRef(npCfg, *gw.Spec.Infrastructure.ParametersRef) + conds = append(conds, paramConds...) + } + + return conds, valid } diff --git a/internal/mode/static/state/graph/gateway_listener.go b/internal/mode/static/state/graph/gateway_listener.go index aa5b7062e1..5bf46bd502 100644 --- a/internal/mode/static/state/graph/gateway_listener.go +++ b/internal/mode/static/state/graph/gateway_listener.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" @@ -21,6 +22,8 @@ import ( // For now, we only support HTTP and HTTPS listeners. type Listener struct { Name string + // GatewayName is the name of the Gateway resource this Listener belongs to. + GatewayName types.NamespacedName // Source holds the source of the Listener from the Gateway resource. Source v1.Listener // Routes holds the GRPC/HTTPRoutes attached to the Listener. @@ -57,7 +60,7 @@ func buildListeners( for _, gl := range gw.Spec.Listeners { configurator := listenerFactory.getConfiguratorForListener(gl) - listeners = append(listeners, configurator.configure(gl)) + listeners = append(listeners, configurator.configure(gl, client.ObjectKeyFromObject(gw))) } return listeners @@ -167,7 +170,7 @@ type listenerConfigurator struct { externalReferenceResolvers []listenerExternalReferenceResolver } -func (c *listenerConfigurator) configure(listener v1.Listener) *Listener { +func (c *listenerConfigurator) configure(listener v1.Listener, gwNSName types.NamespacedName) *Listener { var conds []conditions.Condition attachable := true @@ -197,6 +200,7 @@ func (c *listenerConfigurator) configure(listener v1.Listener) *Listener { l := &Listener{ Name: string(listener.Name), + GatewayName: gwNSName, Source: listener, Conditions: conds, AllowedRouteLabelSelector: allowedRouteSelector, diff --git a/internal/mode/static/state/graph/gateway_test.go b/internal/mode/static/state/graph/gateway_test.go index 7718a084c5..63bd96e753 100644 --- a/internal/mode/static/state/graph/gateway_test.go +++ b/internal/mode/static/state/graph/gateway_test.go @@ -8,70 +8,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + "sigs.k8s.io/gateway-api/apis/v1beta1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) -func TestProcessedGatewaysGetAllNsNames(t *testing.T) { - t.Parallel() - winner := &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "gateway-1", - }, - } - loser := &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "gateway-2", - }, - } - - tests := []struct { - gws processedGateways - name string - expected []types.NamespacedName - }{ - { - gws: processedGateways{}, - expected: nil, - name: "no gateways", - }, - { - gws: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{ - client.ObjectKeyFromObject(loser): loser, - }, - }, - expected: []types.NamespacedName{ - client.ObjectKeyFromObject(winner), - client.ObjectKeyFromObject(loser), - }, - name: "winner and ignored", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - result := test.gws.GetAllNsNames() - g.Expect(result).To(Equal(test.expected)) - }) - } -} - func TestProcessGateways(t *testing.T) { t.Parallel() const gcName = "test-gc" - winner := &v1.Gateway{ + gw1 := &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "gateway-1", @@ -80,7 +34,7 @@ func TestProcessGateways(t *testing.T) { GatewayClassName: gcName, }, } - loser := &v1.Gateway{ + gw2 := &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "gateway-2", @@ -92,12 +46,12 @@ func TestProcessGateways(t *testing.T) { tests := []struct { gws map[types.NamespacedName]*v1.Gateway - expected processedGateways + expected map[types.NamespacedName]*v1.Gateway name string }{ { gws: nil, - expected: processedGateways{}, + expected: nil, name: "no gateways", }, { @@ -106,29 +60,26 @@ func TestProcessGateways(t *testing.T) { Spec: v1.GatewaySpec{GatewayClassName: "some-class"}, }, }, - expected: processedGateways{}, + expected: nil, name: "unrelated gateway", }, { gws: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-1"}: winner, + {Namespace: "test", Name: "gateway-1"}: gw1, }, - expected: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{}, + expected: map[types.NamespacedName]*v1.Gateway{ + {Namespace: "test", Name: "gateway-1"}: gw1, }, name: "one gateway", }, { gws: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-1"}: winner, - {Namespace: "test", Name: "gateway-2"}: loser, + {Namespace: "test", Name: "gateway-1"}: gw1, + {Namespace: "test", Name: "gateway-2"}: gw2, }, - expected: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: loser, - }, + expected: map[types.NamespacedName]*v1.Gateway{ + {Namespace: "test", Name: "gateway-1"}: gw1, + {Namespace: "test", Name: "gateway-2"}: gw2, }, name: "multiple gateways", }, @@ -150,9 +101,6 @@ func TestBuildGateway(t *testing.T) { labelSet := map[string]string{ "key": "value", } - protectedPorts := ProtectedPorts{ - 9113: "MetricsPort", - } listenerAllowedRoutes := v1.Listener{ Name: "listener-with-allowed-routes", Hostname: helpers.GetPointer[v1.Hostname]("foo.example.com"), @@ -338,14 +286,18 @@ func TestBuildGateway(t *testing.T) { ) type gatewayCfg struct { + name string + ref *v1.LocalParametersReference listeners []v1.Listener addresses []v1.GatewaySpecAddress } var lastCreatedGateway *v1.Gateway - createGateway := func(cfg gatewayCfg) *v1.Gateway { + createGateway := func(cfg gatewayCfg) map[types.NamespacedName]*v1.Gateway { + gatewayMap := make(map[types.NamespacedName]*v1.Gateway) lastCreatedGateway = &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ + Name: cfg.name, Namespace: "test", }, Spec: v1.GatewaySpec{ @@ -354,12 +306,74 @@ func TestBuildGateway(t *testing.T) { Addresses: cfg.addresses, }, } - return lastCreatedGateway + + if cfg.ref != nil { + lastCreatedGateway.Spec.Infrastructure = &v1.GatewayInfrastructure{ + ParametersRef: cfg.ref, + } + } + + gatewayMap[types.NamespacedName{ + Namespace: lastCreatedGateway.Namespace, + Name: lastCreatedGateway.Name, + }] = lastCreatedGateway + return gatewayMap } + getLastCreatedGateway := func() *v1.Gateway { return lastCreatedGateway } + validGwNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "valid-gw-np", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError)}, + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, + }, + } + validGwNpRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: validGwNp.Name, + } + invalidGwNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "invalid-gw-np", + }, + } + invalidGwNpRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: invalidGwNp.Name, + } + invalidKindRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: "Invalid", + Name: "invalid-kind", + } + npDoesNotExistRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "does-not-exist", + } + + validGcNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "valid-gc-np", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + }, + } + validGC := &GatewayClass{ Valid: true, } @@ -367,105 +381,136 @@ func TestBuildGateway(t *testing.T) { Valid: false, } + validGCWithNp := &GatewayClass{ + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGcNp, + Valid: true, + }, + } + supportedKindsForListeners := []v1.RouteGroupKind{ {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, } tests := []struct { - gateway *v1.Gateway + gateway map[types.NamespacedName]*v1.Gateway gatewayClass *GatewayClass refGrants map[types.NamespacedName]*v1beta1.ReferenceGrant - expected *Gateway + expected map[types.NamespacedName]*Gateway name string }{ { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1, foo8080Listener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, foo8080Listener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8080", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8080Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "foo-8080", - Source: foo8080Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "valid http listeners", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443HTTPSListener1, foo8443HTTPSListener}}, + gatewayCfg{name: "gateway-https", listeners: []v1.Listener{foo443HTTPSListener1, foo8443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway-https"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "foo-8443-https", - Source: foo8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-https", gcName), }, + Valid: true, }, - Valid: true, }, name: "valid https listeners", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{listenerAllowedRoutes}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{listenerAllowedRoutes}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-with-allowed-routes", - Source: listenerAllowedRoutes, - Valid: true, - Attachable: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(labels.Set(labelSet)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-with-allowed-routes", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: listenerAllowedRoutes, + Valid: true, + Attachable: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(labels.Set(labelSet)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "valid http listener with allowed routes label selector", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{crossNamespaceSecretListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{crossNamespaceSecretListener}}), gatewayClass: validGC, refGrants: map[types.NamespacedName]*v1beta1.ReferenceGrant{ {Name: "ref-grant", Namespace: "diff-ns"}: { @@ -491,97 +536,247 @@ func TestBuildGateway(t *testing.T) { }, }, }, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-cross-ns-secret", - Source: crossNamespaceSecretListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretDiffNamespace)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-cross-ns-secret", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: crossNamespaceSecretListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretDiffNamespace)), + SupportedKinds: supportedKindsForListeners, + }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "valid https listener with cross-namespace secret; allowed by reference grant", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{crossNamespaceSecretListener}}), + gateway: createGateway(gatewayCfg{ + name: "gateway-valid-np", + listeners: []v1.Listener{foo80Listener1}, + ref: validGwNpRef, + }), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-cross-ns-secret", - Source: crossNamespaceSecretListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerRefNotPermitted( - `Certificate ref to secret diff-ns/secret not permitted by any ReferenceGrant`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: validGwNp.Namespace, Name: "gateway-valid-np"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-valid-np", gcName), + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + }, + }, + name: "valid http listener with valid NginxProxy; GatewayClass has no NginxProxy", + }, + { + gateway: createGateway(gatewayCfg{ + name: "gateway-valid-np", + listeners: []v1.Listener{foo80Listener1}, + ref: validGwNpRef, + }), + gatewayClass: validGCWithNp, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: validGwNp.Namespace, Name: "gateway-valid-np"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-valid-np", gcName), + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + }, + }, + name: "valid http listener with valid NginxProxy; GatewayClass has valid NginxProxy too", + }, + { + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1}}), + gatewayClass: validGCWithNp, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, }, - Valid: true, + }, + name: "valid http listener; GatewayClass has valid NginxProxy", + }, + { + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{crossNamespaceSecretListener}}), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-cross-ns-secret", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: crossNamespaceSecretListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerRefNotPermitted( + `Certificate ref to secret diff-ns/secret not permitted by any ReferenceGrant`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, + }, }, name: "invalid attachable https listener with cross-namespace secret; no reference grant", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{listenerInvalidSelector}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{listenerInvalidSelector}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-with-invalid-selector", - Source: listenerInvalidSelector, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `invalid label selector: "invalid" is not a valid label selector operator`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-with-invalid-selector", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: listenerInvalidSelector, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `invalid label selector: "invalid" is not a valid label selector operator`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "attachable http listener with invalid label selector", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{invalidProtocolListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidProtocolListener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-protocol", - Source: invalidProtocolListener, - Valid: false, - Attachable: false, - Conditions: staticConds.NewListenerUnsupportedProtocol( - `protocol: Unsupported value: "TCP": supported values: "HTTP", "HTTPS", "TLS"`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-protocol", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidProtocolListener, + Valid: false, + Attachable: false, + Conditions: staticConds.NewListenerUnsupportedProtocol( + `protocol: Unsupported value: "TCP": supported values: "HTTP", "HTTPS", "TLS"`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "invalid listener protocol", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ invalidPortListener, invalidHTTPSPortListener, @@ -590,107 +785,132 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-port", - Source: invalidPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 0: port must be between 1-65535`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "invalid-https-port", - Source: invalidHTTPSPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 65536: port must be between 1-65535`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 0: port must be between 1-65535`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-https-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHTTPSPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 65536: port must be between 1-65535`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-protected-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidProtectedPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 9113: port is already in use as MetricsPort`, + ), + SupportedKinds: supportedKindsForListeners, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + }, }, - { - Name: "invalid-protected-port", - Source: invalidProtectedPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 9113: port is already in use as MetricsPort`, - ), - SupportedKinds: supportedKindsForListeners, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid ports", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{invalidHostnameListener, invalidHTTPSHostnameListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidHostnameListener, invalidHTTPSHostnameListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-hostname", - Source: invalidHostnameListener, - Valid: false, - Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-hostname", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHostnameListener, + Valid: false, + Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-https-hostname", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHTTPSHostnameListener, + Valid: false, + Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "invalid-https-hostname", - Source: invalidHTTPSHostnameListener, - Valid: false, - Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid hostnames", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{invalidTLSConfigListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidTLSConfigListener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-tls-config", - Source: invalidTLSConfigListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerInvalidCertificateRef( - `tls.certificateRefs[0]: Invalid value: test/does-not-exist: secret does not exist`, - ), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-tls-config", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidTLSConfigListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerInvalidCertificateRef( + `tls.certificateRefs[0]: Invalid value: test/does-not-exist: secret does not exist`, + ), + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid https listener (secret does not exist)", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ foo80Listener1, foo8080Listener, @@ -704,93 +924,108 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8080", - Source: foo8080Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8081", - Source: foo8081Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8443-https", - Source: foo8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-80", - Source: bar80Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8080", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8080Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8081", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8081Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-80", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar80Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "bar-8443-https", - Source: bar8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "multiple valid http/https listeners", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ foo80Listener1, bar80Listener, @@ -802,91 +1037,110 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-80", - Source: bar80Listener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-http", - Source: foo443HTTPListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-80-https", - Source: foo80HTTPSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-80", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar80Listener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-http", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-80-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80HTTPSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "port/protocol collisions", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{foo80Listener1, foo443HTTPSListener1}, addresses: []v1.GatewaySpecAddress{{}}, }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayUnsupportedValue("spec." + - "addresses: Forbidden: addresses are not supported", - ), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayUnsupportedValue("spec." + + "addresses: Forbidden: addresses are not supported", + ), + }, }, name: "gateway addresses are not supported", }, @@ -897,58 +1151,78 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, ), gatewayClass: invalidGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayInvalid("GatewayClass is invalid"), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayInvalid("GatewayClass is invalid"), + }, }, name: "invalid gatewayclass", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, ), gatewayClass: nil, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + }, }, name: "nil gatewayclass", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, foo443HTTPListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, foo443HTTPListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "foo-443-http", - Source: foo443HTTPListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "foo-443-http", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: supportedKindsForListeners, + }, }, }, }, @@ -956,35 +1230,43 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, splat443HTTPSListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, splat443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "splat-443-https", - Source: splat443HTTPSListener, - Valid: false, - Attachable: true, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "splat-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: splat443HTTPSListener, + Valid: false, + Attachable: true, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), + SupportedKinds: supportedKindsForListeners, + }, }, }, }, @@ -992,38 +1274,204 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, bar443HTTPSListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, bar443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: true, - Attachable: true, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: true, + Attachable: true, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, }, }, name: "https listener and tls listener with non overlapping hostnames", }, + { + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: invalidKindRef, + }, + ), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), + }, + }, + }, + name: "invalid parameters ref kind", + }, + { + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: npDoesNotExistRef, + }, + ), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.name: Not found: \"does-not-exist\"", + ), + }, + }, + }, + name: "referenced NginxProxy doesn't exist", + }, + { + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: invalidGwNpRef, + }, + ), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid NginxProxy does not invalidate Gateway. + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + }, + }, + }, + name: "invalid NginxProxy", + }, + { + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}, ref: invalidGwNpRef, + }, + ), + gatewayClass: invalidGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: append( + staticConds.NewGatewayInvalid("GatewayClass is invalid"), + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + ), + }, + }, + name: "invalid gatewayclass and invalid NginxProxy", + }, } secretResolver := newSecretResolver( @@ -1032,12 +1480,106 @@ func TestBuildGateway(t *testing.T) { client.ObjectKeyFromObject(secretDiffNamespace): secretDiffNamespace, }) + nginxProxies := map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(validGwNp): {Valid: true, Source: validGwNp}, + client.ObjectKeyFromObject(validGcNp): {Valid: true, Source: validGcNp}, + client.ObjectKeyFromObject(invalidGwNp): { + Source: invalidGwNp, + ErrMsgs: append(field.ErrorList{}, field.Required(field.NewPath("somePath"), "someField")), + Valid: false, + }, + } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(test.refGrants) - result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, protectedPorts) + result := buildGateways(test.gateway, secretResolver, test.gatewayClass, resolver, nginxProxies) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) }) } } + +func TestValidateGatewayParametersRef(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + np *NginxProxy + ref v1.LocalParametersReference + expConds []conditions.Condition + }{ + { + name: "unsupported parameter ref kind", + ref: v1.LocalParametersReference{ + Kind: "wrong-kind", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefInvalid( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"wrong-kind\": " + + "supported values: \"NginxProxy\"", + ), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"wrong-kind\": " + + "supported values: \"NginxProxy\"", + ), + }, + }, + { + name: "nil nginx proxy", + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters("spec.infrastructure.parametersRef.name: Not found: \"np\""), + }, + }, + { + name: "invalid nginx proxy", + np: &NginxProxy{ + Source: &ngfAPIv1alpha2.NginxProxy{}, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + }, + }, + { + name: "valid", + np: &NginxProxy{ + Source: &ngfAPIv1alpha2.NginxProxy{}, + Valid: true, + }, + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayResolvedRefs(), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conds := validateGatewayParametersRef(test.np, test.ref) + g.Expect(conds).To(BeEquivalentTo(test.expConds)) + }) + } +} diff --git a/internal/mode/static/state/graph/gatewayclass.go b/internal/mode/static/state/graph/gatewayclass.go index 510fd63f32..db5e3dff77 100644 --- a/internal/mode/static/state/graph/gatewayclass.go +++ b/internal/mode/static/state/graph/gatewayclass.go @@ -1,8 +1,6 @@ package graph import ( - "errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" @@ -19,6 +17,8 @@ import ( type GatewayClass struct { // Source is the source resource. Source *v1.GatewayClass + // NginxProxy is the NginxProxy resource referenced by this GatewayClass. + NginxProxy *NginxProxy // Conditions include Conditions for the GatewayClass. Conditions []conditions.Condition // Valid shows whether the GatewayClass is valid. @@ -34,7 +34,7 @@ type processedGatewayClasses struct { // processGatewayClasses returns the "Winner" GatewayClass, which is defined in // the command-line argument and references this controller, and a list of "Ignored" GatewayClasses // that reference this controller, but are not named in the command-line argument. -// Also returns a boolean that says whether or not the GatewayClass defined +// Also returns a boolean that says whether the GatewayClass defined // in the command-line argument exists, regardless of which controller it references. func processGatewayClasses( gcs map[types.NamespacedName]*v1.GatewayClass, @@ -63,22 +63,66 @@ func processGatewayClasses( func buildGatewayClass( gc *v1.GatewayClass, - npCfg *NginxProxy, + nps map[types.NamespacedName]*NginxProxy, crdVersions map[types.NamespacedName]*metav1.PartialObjectMetadata, ) *GatewayClass { if gc == nil { return nil } - conds, valid := validateGatewayClass(gc, npCfg, crdVersions) + var np *NginxProxy + if gc.Spec.ParametersRef != nil { + np = getNginxProxyForGatewayClass(*gc.Spec.ParametersRef, nps) + } + + conds, valid := validateGatewayClass(gc, np, crdVersions) return &GatewayClass{ Source: gc, + NginxProxy: np, Valid: valid, Conditions: conds, } } +func getNginxProxyForGatewayClass( + ref v1.ParametersReference, + nps map[types.NamespacedName]*NginxProxy, +) *NginxProxy { + if ref.Namespace == nil { + return nil + } + + npName := types.NamespacedName{Name: ref.Name, Namespace: string(*ref.Namespace)} + + return nps[npName] +} + +func validateGatewayClassParametersRef(path *field.Path, ref v1.ParametersReference) []conditions.Condition { + var errs field.ErrorList + + if _, ok := supportedParamKinds[string(ref.Kind)]; !ok { + errs = append( + errs, + field.NotSupported(path.Child("kind"), string(ref.Kind), []string{kinds.NginxProxy}), + ) + } + + if ref.Namespace == nil { + errs = append(errs, field.Required(path.Child("namespace"), "ParametersRef must specify Namespace")) + } + + if len(errs) > 0 { + msg := errs.ToAggregate().Error() + return []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid(msg), + staticConds.NewGatewayClassInvalidParameters(msg), + } + } + + return nil +} + func validateGatewayClass( gc *v1.GatewayClass, npCfg *NginxProxy, @@ -86,28 +130,44 @@ func validateGatewayClass( ) ([]conditions.Condition, bool) { var conds []conditions.Condition - if gc.Spec.ParametersRef != nil { - var err error - path := field.NewPath("spec").Child("parametersRef") - if _, ok := supportedParamKinds[string(gc.Spec.ParametersRef.Kind)]; !ok { - err = field.NotSupported(path.Child("kind"), string(gc.Spec.ParametersRef.Kind), []string{kinds.NginxProxy}) - } else if npCfg == nil { - err = field.NotFound(path.Child("name"), gc.Spec.ParametersRef.Name) - conds = append(conds, staticConds.NewGatewayClassRefNotFound()) - } else if !npCfg.Valid { - err = errors.New(npCfg.ErrMsgs.ToAggregate().Error()) - } + supportedVersionConds, versionsValid := gatewayclass.ValidateCRDVersions(crdVersions) + conds = append(conds, supportedVersionConds...) - if err != nil { - conds = append(conds, staticConds.NewGatewayClassInvalidParameters(err.Error())) - } else { - conds = append(conds, staticConds.NewGatewayClassResolvedRefs()) - } + if gc.Spec.ParametersRef == nil { + return conds, versionsValid } - supportedVersionConds, versionsValid := gatewayclass.ValidateCRDVersions(crdVersions) + path := field.NewPath("spec").Child("parametersRef") + refConds := validateGatewayClassParametersRef(path, *gc.Spec.ParametersRef) + + // return early since parametersRef isn't valid + if len(refConds) > 0 { + conds = append(conds, refConds...) + return conds, versionsValid + } + + if npCfg == nil { + conds = append( + conds, + staticConds.NewGatewayClassRefNotFound(), + staticConds.NewGatewayClassInvalidParameters( + field.NotFound(path.Child("name"), gc.Spec.ParametersRef.Name).Error(), + ), + ) + return conds, versionsValid + } + + if !npCfg.Valid { + msg := npCfg.ErrMsgs.ToAggregate().Error() + conds = append( + conds, + staticConds.NewGatewayClassRefInvalid(msg), + staticConds.NewGatewayClassInvalidParameters(msg), + ) + return conds, versionsValid + } - return append(conds, supportedVersionConds...), versionsValid + return append(conds, staticConds.NewGatewayClassResolvedRefs()), versionsValid } var supportedParamKinds = map[string]struct{}{ diff --git a/internal/mode/static/state/graph/gatewayclass_test.go b/internal/mode/static/state/graph/gatewayclass_test.go index 1f1454e838..3d26b6b1fd 100644 --- a/internal/mode/static/state/graph/gatewayclass_test.go +++ b/internal/mode/static/state/graph/gatewayclass_test.go @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" @@ -127,17 +127,32 @@ func TestProcessGatewayClasses(t *testing.T) { func TestBuildGatewayClass(t *testing.T) { t.Parallel() validGC := &v1.GatewayClass{} + npNsName := types.NamespacedName{Namespace: "test", Name: "nginx-proxy"} + + np := &ngfAPIv1alpha2.NginxProxy{ + TypeMeta: metav1.TypeMeta{ + Kind: kinds.NginxProxy, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("my-svc"), + }, + }, + } gcWithParams := &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ Kind: v1.Kind(kinds.NginxProxy), - Namespace: helpers.GetPointer(v1.Namespace("test")), - Name: "nginx-proxy", + Namespace: helpers.GetPointer(v1.Namespace(npNsName.Namespace)), + Name: npNsName.Name, }, }, } + gcWithParamsNoNamespace := gcWithParams.DeepCopy() + gcWithParamsNoNamespace.Spec.ParametersRef.Namespace = nil + gcWithInvalidKind := &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ @@ -168,12 +183,11 @@ func TestBuildGatewayClass(t *testing.T) { } tests := []struct { - gc *v1.GatewayClass - np *NginxProxy - crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata - expected *GatewayClass - name string - expNPInvalid bool + gc *v1.GatewayClass + nps map[types.NamespacedName]*NginxProxy + crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata + expected *GatewayClass + name string }{ { gc: validGC, @@ -191,46 +205,54 @@ func TestBuildGatewayClass(t *testing.T) { }, { gc: gcWithParams, - np: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - TypeMeta: metav1.TypeMeta{ - Kind: kinds.NginxProxy, - }, - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - ServiceName: helpers.GetPointer("my-svc"), - }, - }, + nps: map[types.NamespacedName]*NginxProxy{ + npNsName: { + Source: np, + Valid: true, }, - Valid: true, }, expected: &GatewayClass{ Source: gcWithParams, Valid: true, Conditions: []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + NginxProxy: &NginxProxy{ + Valid: true, + Source: np, + }, }, name: "valid gatewayclass with paramsRef", }, { - gc: gcWithInvalidKind, - np: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - TypeMeta: metav1.TypeMeta{ - Kind: kinds.NginxProxy, - }, + gc: gcWithParamsNoNamespace, + expected: &GatewayClass{ + Source: gcWithParamsNoNamespace, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "spec.parametersRef.namespace: Required value: ParametersRef must specify Namespace", + ), + staticConds.NewGatewayClassInvalidParameters( + "spec.parametersRef.namespace: Required value: ParametersRef must specify Namespace", + ), }, - Valid: true, }, + name: "valid gatewayclass with paramsRef missing namespace", + }, + { + gc: gcWithInvalidKind, expected: &GatewayClass{ Source: gcWithInvalidKind, Valid: true, Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "spec.parametersRef.kind: Unsupported value: \"Invalid\": supported values: \"NginxProxy\"", + ), staticConds.NewGatewayClassInvalidParameters( "spec.parametersRef.kind: Unsupported value: \"Invalid\": supported values: \"NginxProxy\"", ), }, }, - name: "invalid gatewayclass with unsupported paramsRef Kind", + name: "valid gatewayclass with unsupported paramsRef Kind", }, { gc: gcWithParams, @@ -244,38 +266,57 @@ func TestBuildGatewayClass(t *testing.T) { ), }, }, - expNPInvalid: true, - name: "invalid gatewayclass with paramsRef resource that doesn't exist", + name: "valid gatewayclass with paramsRef resource that doesn't exist", }, { gc: gcWithParams, - np: &NginxProxy{ - Valid: false, - ErrMsgs: field.ErrorList{ - field.Invalid( - field.NewPath("spec", "telemetry", "serviceName"), - "my-svc", - "error", - ), - field.Invalid( - field.NewPath("spec", "telemetry", "exporter", "endpoint"), - "my-endpoint", - "error", - ), + nps: map[types.NamespacedName]*NginxProxy{ + npNsName: { + Valid: false, + ErrMsgs: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "telemetry", "serviceName"), + "my-svc", + "error", + ), + field.Invalid( + field.NewPath("spec", "telemetry", "exporter", "endpoint"), + "my-endpoint", + "error", + ), + }, }, }, expected: &GatewayClass{ Source: gcWithParams, Valid: true, Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "[spec.telemetry.serviceName: Invalid value: \"my-svc\": error" + + ", spec.telemetry.exporter.endpoint: Invalid value: \"my-endpoint\": error]", + ), staticConds.NewGatewayClassInvalidParameters( "[spec.telemetry.serviceName: Invalid value: \"my-svc\": error" + ", spec.telemetry.exporter.endpoint: Invalid value: \"my-endpoint\": error]", ), }, + NginxProxy: &NginxProxy{ + Valid: false, + ErrMsgs: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "telemetry", "serviceName"), + "my-svc", + "error", + ), + field.Invalid( + field.NewPath("spec", "telemetry", "exporter", "endpoint"), + "my-endpoint", + "error", + ), + }, + }, }, - expNPInvalid: true, - name: "invalid gatewayclass with invalid paramsRef resource", + name: "valid gatewayclass with invalid paramsRef resource", }, { gc: validGC, @@ -294,11 +335,8 @@ func TestBuildGatewayClass(t *testing.T) { t.Parallel() g := NewWithT(t) - result := buildGatewayClass(test.gc, test.np, test.crdMetadata) + result := buildGatewayClass(test.gc, test.nps, test.crdMetadata) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) - if test.np != nil { - g.Expect(test.np.Valid).ToNot(Equal(test.expNPInvalid)) - } }) } } diff --git a/internal/mode/static/state/graph/graph.go b/internal/mode/static/state/graph/graph.go index 834eff4de0..61d39fb44c 100644 --- a/internal/mode/static/state/graph/graph.go +++ b/internal/mode/static/state/graph/graph.go @@ -8,13 +8,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" "sigs.k8s.io/gateway-api/apis/v1alpha3" "sigs.k8s.io/gateway-api/apis/v1beta1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" @@ -35,26 +35,22 @@ type ClusterState struct { CRDMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata BackendTLSPolicies map[types.NamespacedName]*v1alpha3.BackendTLSPolicy ConfigMaps map[types.NamespacedName]*v1.ConfigMap - NginxProxies map[types.NamespacedName]*ngfAPI.NginxProxy + NginxProxies map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy GRPCRoutes map[types.NamespacedName]*gatewayv1.GRPCRoute NGFPolicies map[PolicyKey]policies.Policy - SnippetsFilters map[types.NamespacedName]*ngfAPI.SnippetsFilter + SnippetsFilters map[types.NamespacedName]*ngfAPIv1alpha1.SnippetsFilter } // Graph is a Graph-like representation of Gateway API resources. type Graph struct { // GatewayClass holds the GatewayClass resource. GatewayClass *GatewayClass - // Gateway holds the winning Gateway resource. - Gateway *Gateway + // Gateways holds the all Gateway resource. + Gateways map[types.NamespacedName]*Gateway // IgnoredGatewayClasses holds the ignored GatewayClass resources, which reference NGINX Gateway Fabric in the // controllerName, but are not configured via the NGINX Gateway Fabric CLI argument. It doesn't hold the GatewayClass // resources that do not belong to the NGINX Gateway Fabric. IgnoredGatewayClasses map[types.NamespacedName]*gatewayv1.GatewayClass - // IgnoredGateways holds the ignored Gateway resources, which belong to the NGINX Gateway Fabric (based on the - // GatewayClassName field of the resource) but ignored. It doesn't hold the Gateway resources that do not belong to - // the NGINX Gateway Fabric. - IgnoredGateways map[types.NamespacedName]*gatewayv1.Gateway // Routes hold Route resources. Routes map[RouteKey]*L7Route // L4Routes hold L4Route resources. @@ -70,21 +66,24 @@ type Graph struct { ReferencedServices map[types.NamespacedName]*ReferencedService // ReferencedCaCertConfigMaps includes ConfigMaps that have been referenced by any BackendTLSPolicies. ReferencedCaCertConfigMaps map[types.NamespacedName]*CaCertConfigMap + // ReferencedNginxProxies includes NginxProxies that have been referenced by a GatewayClass or a Gateway. + ReferencedNginxProxies map[types.NamespacedName]*NginxProxy // BackendTLSPolicies holds BackendTLSPolicy resources. BackendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy - // NginxProxy holds the NginxProxy config for the GatewayClass. - NginxProxy *NginxProxy // NGFPolicies holds all NGF Policies. NGFPolicies map[PolicyKey]*Policy - // GlobalSettings contains global settings from the current state of the graph that may be - // needed for policy validation or generation if certain policies rely on those global settings. - GlobalSettings *policies.GlobalSettings // SnippetsFilters holds all the SnippetsFilters. SnippetsFilters map[types.NamespacedName]*SnippetsFilter // PlusSecrets holds the secrets related to NGINX Plus licensing. PlusSecrets map[types.NamespacedName][]PlusSecretFile } +// NginxReloadResult describes the result of an NGINX reload. +type NginxReloadResult struct { + // Error is the error that occurred during the reload. + Error error +} + // ProtectedPorts are the ports that may not be configured by a listener with a descriptive name of each port. type ProtectedPorts map[int32]string @@ -113,7 +112,7 @@ func (g *Graph) IsReferenced(resourceType ngftypes.ObjectType, nsname types.Name // `exists` does not cover the case highlighted above by `existed` and vice versa so both are needed. _, existed := g.ReferencedNamespaces[nsname] - exists := isNamespaceReferenced(obj, g.Gateway) + exists := isNamespaceReferenced(obj, g.Gateways) return existed || exists // Service reference exists if at least one HTTPRoute references it. case *v1.Service: @@ -126,9 +125,10 @@ func (g *Graph) IsReferenced(resourceType ngftypes.ObjectType, nsname types.Name // Service Namespace should be the same Namespace as the EndpointSlice _, exists := g.ReferencedServices[types.NamespacedName{Namespace: nsname.Namespace, Name: svcName}] return exists - // NginxProxy reference exists if it is linked to a GatewayClass. - case *ngfAPI.NginxProxy: - return isNginxProxyReferenced(nsname, g.GatewayClass) + // NginxProxy reference exists if the GatewayClass or Gateway references it. + case *ngfAPIv1alpha2.NginxProxy: + _, exists := g.ReferencedNginxProxies[nsname] + return exists default: return false } @@ -177,11 +177,11 @@ func (g *Graph) gatewayAPIResourceExist(ref v1alpha2.LocalPolicyTargetReference, switch kind := ref.Kind; kind { case kinds.Gateway: - if g.Gateway == nil { + if len(g.Gateways) == 0 { return false } - return gatewayExists(refNsName, g.Gateway.Source, g.IgnoredGateways) + return gatewayExists(refNsName, g.Gateways) case kinds.HTTPRoute, kinds.GRPCRoute: _, exists := g.Routes[routeKeyForKind(kind, refNsName)] return exists @@ -198,41 +198,46 @@ func BuildGraph( gcName string, plusSecrets map[types.NamespacedName][]PlusSecretFile, validators validation.Validators, - protectedPorts ProtectedPorts, ) *Graph { - var globalSettings *policies.GlobalSettings - processedGwClasses, gcExists := processGatewayClasses(state.GatewayClasses, gcName, controllerName) if gcExists && processedGwClasses.Winner == nil { // configured GatewayClass does not reference this controller return &Graph{} } - npCfg := buildNginxProxy(state.NginxProxies, processedGwClasses.Winner, validators.GenericValidator) - gc := buildGatewayClass(processedGwClasses.Winner, npCfg, state.CRDMetadata) - if gc != nil && npCfg != nil && npCfg.Source != nil { - spec := npCfg.Source.Spec - globalSettings = &policies.GlobalSettings{ - NginxProxyValid: npCfg.Valid, - TelemetryEnabled: spec.Telemetry != nil && spec.Telemetry.Exporter != nil, - } - } + processedGws := processGateways(state.Gateways, gcName) + processedNginxProxies := processNginxProxies( + state.NginxProxies, + validators.GenericValidator, + processedGwClasses.Winner, + processedGws, + ) + + gc := buildGatewayClass( + processedGwClasses.Winner, + processedNginxProxies, + state.CRDMetadata, + ) secretResolver := newSecretResolver(state.Secrets) configMapResolver := newConfigMapResolver(state.ConfigMaps) - processedGws := processGateways(state.Gateways, gcName) - refGrantResolver := newReferenceGrantResolver(state.ReferenceGrants) - gw := buildGateway(processedGws.Winner, secretResolver, gc, refGrantResolver, protectedPorts) + gws := buildGateways( + processedGws, + secretResolver, + gc, + refGrantResolver, + processedNginxProxies, + ) processedBackendTLSPolicies := processBackendTLSPolicies( state.BackendTLSPolicies, configMapResolver, secretResolver, controllerName, - gw, + gws, ) processedSnippetsFilters := processSnippetsFilters(state.SnippetsFilters) @@ -241,77 +246,70 @@ func BuildGraph( validators.HTTPFieldsValidator, state.HTTPRoutes, state.GRPCRoutes, - processedGws.GetAllNsNames(), - npCfg, + gws, processedSnippetsFilters, ) l4routes := buildL4RoutesForGateways( state.TLSRoutes, - processedGws.GetAllNsNames(), state.Services, - npCfg, + gws, + refGrantResolver, + ) + + addBackendRefsToRouteRules( + routes, refGrantResolver, + state.Services, + processedBackendTLSPolicies, ) + bindRoutesToListeners(routes, l4routes, gws, state.Namespaces) - bindRoutesToListeners(routes, l4routes, gw, state.Namespaces) - addBackendRefsToRouteRules(routes, refGrantResolver, state.Services, processedBackendTLSPolicies, npCfg) + referencedNamespaces := buildReferencedNamespaces(state.Namespaces, gws) - referencedNamespaces := buildReferencedNamespaces(state.Namespaces, gw) + referencedServices := buildReferencedServices(routes, l4routes, gws) - referencedServices := buildReferencedServices(routes, l4routes, gw) + addGatewaysForBackendTLSPolicies(processedBackendTLSPolicies, referencedServices) // policies must be processed last because they rely on the state of the other resources in the graph processedPolicies := processPolicies( state.NGFPolicies, validators.PolicyValidator, - processedGws, routes, referencedServices, - globalSettings, + gws, ) setPlusSecretContent(state.Secrets, plusSecrets) g := &Graph{ GatewayClass: gc, - Gateway: gw, + Gateways: gws, Routes: routes, L4Routes: l4routes, IgnoredGatewayClasses: processedGwClasses.Ignored, - IgnoredGateways: processedGws.Ignored, ReferencedSecrets: secretResolver.getResolvedSecrets(), ReferencedNamespaces: referencedNamespaces, ReferencedServices: referencedServices, ReferencedCaCertConfigMaps: configMapResolver.getResolvedConfigMaps(), + ReferencedNginxProxies: processedNginxProxies, BackendTLSPolicies: processedBackendTLSPolicies, - NginxProxy: npCfg, NGFPolicies: processedPolicies, - GlobalSettings: globalSettings, SnippetsFilters: processedSnippetsFilters, PlusSecrets: plusSecrets, } - g.attachPolicies(controllerName) + g.attachPolicies(validators.PolicyValidator, controllerName) return g } -func gatewayExists( - gwNsName types.NamespacedName, - winner *gatewayv1.Gateway, - ignored map[types.NamespacedName]*gatewayv1.Gateway, -) bool { - if winner == nil { +func gatewayExists(gwNsName types.NamespacedName, gateways map[types.NamespacedName]*Gateway) bool { + if len(gateways) == 0 { return false } - if client.ObjectKeyFromObject(winner) == gwNsName { - return true - } - - _, exists := ignored[gwNsName] - + _, exists := gateways[gwNsName] return exists } diff --git a/internal/mode/static/state/graph/graph_test.go b/internal/mode/static/state/graph/graph_test.go index fe96db4402..0fdeecaeb3 100644 --- a/internal/mode/static/state/graph/graph_test.go +++ b/internal/mode/static/state/graph/graph_test.go @@ -17,7 +17,8 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha3" "sigs.k8s.io/gateway-api/apis/v1beta1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" @@ -35,11 +36,6 @@ func TestBuildGraph(t *testing.T) { controllerName = "my.controller" ) - protectedPorts := ProtectedPorts{ - 9113: "MetricsPort", - 8081: "HealthPort", - } - cm := &v1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", @@ -89,7 +85,7 @@ func TestBuildGraph(t *testing.T) { }, Valid: true, IsReferenced: true, - Gateway: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Gateways: []types.NamespacedName{{Namespace: testNs, Name: "gateway-1"}}, Conditions: btpAcceptedConds, CaCertRef: types.NamespacedName{Namespace: "service", Name: "configmap"}, } @@ -113,35 +109,35 @@ func TestBuildGraph(t *testing.T) { } refSnippetsFilterExtensionRef := &gatewayv1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "ref-snippets-filter", } - unreferencedSnippetsFilter := &ngfAPI.SnippetsFilter{ + unreferencedSnippetsFilter := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Name: "unref-snippets-filter", Namespace: testNs, }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextMain, + Context: ngfAPIv1alpha1.NginxContextMain, Value: "main snippet", }, }, }, } - referencedSnippetsFilter := &ngfAPI.SnippetsFilter{ + referencedSnippetsFilter := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Name: "ref-snippets-filter", Namespace: testNs, }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextHTTPServer, + Context: ngfAPIv1alpha1.NginxContextHTTPServer, Value: "server snippet", }, }, @@ -152,8 +148,8 @@ func TestBuildGraph(t *testing.T) { Source: unreferencedSnippetsFilter, Valid: true, Referenced: false, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextMain: "main snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextMain: "main snippet", }, } @@ -161,19 +157,20 @@ func TestBuildGraph(t *testing.T) { Source: referencedSnippetsFilter, Valid: true, Referenced: true, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTPServer: "server snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTPServer: "server snippet", }, } createValidRuleWithBackendRefs := func(matches []gatewayv1.HTTPRouteMatch) RouteRule { refs := []BackendRef{ { - SvcNsName: types.NamespacedName{Namespace: "service", Name: "foo"}, - ServicePort: v1.ServicePort{Port: 80}, - Valid: true, - Weight: 1, - BackendTLSPolicy: &btp, + SvcNsName: types.NamespacedName{Namespace: "service", Name: "foo"}, + ServicePort: v1.ServicePort{Port: 80}, + Valid: true, + Weight: 1, + BackendTLSPolicy: &btp, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, } rbrs := []RouteBackendRef{ @@ -374,74 +371,107 @@ func TestBuildGraph(t *testing.T) { }, } - createGateway := func(name string) *gatewayv1.Gateway { - return &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNs, - Name: name, - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: gcName, - Listeners: []gatewayv1.Listener{ - { - Name: "listener-80-1", - Hostname: nil, - Port: 80, - Protocol: gatewayv1.HTTPProtocolType, - AllowedRoutes: &gatewayv1.AllowedRoutes{ - Namespaces: &gatewayv1.RouteNamespaces{ - From: helpers.GetPointer(gatewayv1.NamespacesFromSelector), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "allowed", + createGateway := func(name, nginxProxyName string) *Gateway { + return &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNs, + Name: name, + }, + Spec: gatewayv1.GatewaySpec{ + GatewayClassName: gcName, + Infrastructure: &gatewayv1.GatewayInfrastructure{ + ParametersRef: &gatewayv1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: nginxProxyName, + }, + }, + Listeners: []gatewayv1.Listener{ + { + Name: "listener-80-1", + Hostname: nil, + Port: 80, + Protocol: gatewayv1.HTTPProtocolType, + AllowedRoutes: &gatewayv1.AllowedRoutes{ + Namespaces: &gatewayv1.RouteNamespaces{ + From: helpers.GetPointer(gatewayv1.NamespacesFromSelector), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "allowed", + }, }, }, }, }, - }, - { - Name: "listener-443-1", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.com")), - Port: 443, - TLS: &gatewayv1.GatewayTLSConfig{ - Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), - CertificateRefs: []gatewayv1.SecretObjectReference{ - { - Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), - Name: gatewayv1.ObjectName(secret.Name), - Namespace: helpers.GetPointer(gatewayv1.Namespace(secret.Namespace)), + { + Name: "listener-443-1", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.com")), + Port: 443, + TLS: &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secret.Name), + Namespace: helpers.GetPointer(gatewayv1.Namespace(secret.Namespace)), + }, }, }, + Protocol: gatewayv1.HTTPSProtocolType, }, - Protocol: gatewayv1.HTTPSProtocolType, - }, - { - Name: "listener-443-2", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), - Port: 443, - Protocol: gatewayv1.TLSProtocolType, - TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, - AllowedRoutes: &gatewayv1.AllowedRoutes{ - Kinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + { + Name: "listener-443-2", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), + Port: 443, + Protocol: gatewayv1.TLSProtocolType, + TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + AllowedRoutes: &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, }, - }, - { - Name: "listener-8443", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), - Port: 8443, - Protocol: gatewayv1.TLSProtocolType, - TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + { + Name: "listener-8443", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), + Port: 8443, + Protocol: gatewayv1.TLSProtocolType, + TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + }, }, }, }, } } - gw1 := createGateway("gateway-1") - gw2 := createGateway("gateway-2") + gw1 := createGateway("gateway-1", "np-1") + gw2 := createGateway("gateway-2", "np-2") + + // np1 is referenced by gw1 and sets the nginx error log to error. + np1 := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-1", + Namespace: testNs, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + }, + } + + // np2 is referenced by gw2 and sets the IPFamily to IPv6. + np2 := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-2", + Namespace: testNs, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), + }, + } svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -532,26 +562,47 @@ func TestBuildGraph(t *testing.T) { }, } - proxy := &ngfAPI.NginxProxy{ + // npGlobal is referenced by the gateway class, and we expect it to be configured and merged with np1. + npGlobal := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy", + Name: "np-global", + Namespace: testNs, }, - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Endpoint: "1.2.3.4:123", - Interval: helpers.GetPointer(ngfAPI.Duration("5s")), + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), BatchSize: helpers.GetPointer(int32(512)), BatchCount: helpers.GetPointer(int32(4)), }, ServiceName: helpers.GetPointer("my-svc"), - SpanAttributes: []ngfAPI.SpanAttribute{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "key", Value: "value"}, }, }, }, } + // np1Effective is the combined NginxProxy of npGlobal and np1 + np1Effective := &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + } + // NGF Policies // // We have to use real policies here instead of a mocks because the Diff function we use in the test fails when @@ -559,13 +610,13 @@ func TestBuildGraph(t *testing.T) { // Testing one type of policy per attachment point should suffice. polGVK := schema.GroupVersionKind{Kind: kinds.ClientSettingsPolicy} hrPolicyKey := PolicyKey{GVK: polGVK, NsName: types.NamespacedName{Namespace: testNs, Name: "hrPolicy"}} - hrPolicy := &ngfAPI.ClientSettingsPolicy{ + hrPolicy := &ngfAPIv1alpha1.ClientSettingsPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "hrPolicy", Namespace: testNs, }, TypeMeta: metav1.TypeMeta{Kind: kinds.ClientSettingsPolicy}, - Spec: ngfAPI.ClientSettingsPolicySpec{ + Spec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ TargetRef: createTestRef(kinds.HTTPRoute, gatewayv1.GroupName, "hr-1"), }, } @@ -588,17 +639,18 @@ func TestBuildGraph(t *testing.T) { Nsname: types.NamespacedName{Namespace: testNs, Name: "hr-1"}, }, }, - Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, } gwPolicyKey := PolicyKey{GVK: polGVK, NsName: types.NamespacedName{Namespace: testNs, Name: "gwPolicy"}} - gwPolicy := &ngfAPI.ClientSettingsPolicy{ + gwPolicy := &ngfAPIv1alpha1.ClientSettingsPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "gwPolicy", Namespace: testNs, }, TypeMeta: metav1.TypeMeta{Kind: kinds.ClientSettingsPolicy}, - Spec: ngfAPI.ClientSettingsPolicySpec{ + Spec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ TargetRef: createTestRef(kinds.Gateway, gatewayv1.GroupName, "gateway-1"), }, } @@ -621,7 +673,8 @@ func TestBuildGraph(t *testing.T) { Nsname: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, }, }, - Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, } createStateWithGatewayClass := func(gc *gatewayv1.GatewayClass) ClusterState { @@ -630,8 +683,8 @@ func TestBuildGraph(t *testing.T) { client.ObjectKeyFromObject(gc): gc, }, Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ - client.ObjectKeyFromObject(gw1): gw1, - client.ObjectKeyFromObject(gw2): gw2, + client.ObjectKeyFromObject(gw1.Source): gw1.Source, + client.ObjectKeyFromObject(gw2.Source): gw2.Source, }, HTTPRoutes: map[types.NamespacedName]*gatewayv1.HTTPRoute{ client.ObjectKeyFromObject(hr1): hr1, @@ -667,14 +720,16 @@ func TestBuildGraph(t *testing.T) { ConfigMaps: map[types.NamespacedName]*v1.ConfigMap{ client.ObjectKeyFromObject(cm): cm, }, - NginxProxies: map[types.NamespacedName]*ngfAPI.NginxProxy{ - client.ObjectKeyFromObject(proxy): proxy, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(npGlobal): npGlobal, + client.ObjectKeyFromObject(np1): np1, + client.ObjectKeyFromObject(np2): np2, }, NGFPolicies: map[PolicyKey]policies.Policy{ hrPolicyKey: hrPolicy, gwPolicyKey: gwPolicy, }, - SnippetsFilters: map[types.NamespacedName]*ngfAPI.SnippetsFilter{ + SnippetsFilters: map[types.NamespacedName]*ngfAPIv1alpha1.SnippetsFilter{ client.ObjectKeyFromObject(unreferencedSnippetsFilter): unreferencedSnippetsFilter, client.ObjectKeyFromObject(referencedSnippetsFilter): referencedSnippetsFilter, }, @@ -688,13 +743,21 @@ func TestBuildGraph(t *testing.T) { Source: hr1, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: hr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-80-1": {"foo.example.com"}}, - ListenerPort: 80, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-80-1", + ): {"foo.example.com"}, + }, + ListenerPort: 80, }, }, }, @@ -711,13 +774,22 @@ func TestBuildGraph(t *testing.T) { Source: tr, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443-2": {"fizz.example.org"}, - "listener-8443": {"fizz.example.org"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-443-2", + ): {"fizz.example.org"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-8443", + ): {"fizz.example.org"}, }, }, }, @@ -732,7 +804,8 @@ func TestBuildGraph(t *testing.T) { ServicePort: v1.ServicePort{ Port: 80, }, - Valid: true, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, } @@ -743,12 +816,15 @@ func TestBuildGraph(t *testing.T) { Source: tr2, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, Attachment: &ParentRefAttachmentStatus{ Attached: false, AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteHostnameConflict(), + FailedConditions: []conditions.Condition{staticConds.NewRouteHostnameConflict()}, }, }, }, @@ -762,7 +838,8 @@ func TestBuildGraph(t *testing.T) { ServicePort: v1.ServicePort{ Port: 80, }, - Valid: true, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, } @@ -774,13 +851,21 @@ func TestBuildGraph(t *testing.T) { Source: gr, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: gr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-80-1": {"bar.example.com"}}, - ListenerPort: 80, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-80-1", + ): {"bar.example.com"}, + }, + ListenerPort: 80, }, }, }, @@ -799,13 +884,21 @@ func TestBuildGraph(t *testing.T) { Source: hr3, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: hr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-443-1": {"foo.example.com"}}, - ListenerPort: 443, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-443-1", + ): {"foo.example.com"}, + }, + ListenerPort: 443, }, }, }, @@ -826,61 +919,170 @@ func TestBuildGraph(t *testing.T) { Source: gc, Valid: true, Conditions: []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + NginxProxy: &NginxProxy{ + Source: npGlobal, + Valid: true, + }, }, - Gateway: &Gateway{ - Source: gw1, - Listeners: []*Listener{ - { - Name: "listener-80-1", - Source: gw1.Spec.Listeners[0], - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{ - CreateRouteKey(hr1): routeHR1, - CreateRouteKey(gr): routeGR, + Gateways: map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gateway-1"}: { + Source: gw1.Source, + Listeners: []*Listener{ + { + Name: "listener-80-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{ + CreateRouteKey(hr1): routeHR1, + CreateRouteKey(gr): routeGR, + }, + SupportedKinds: supportedKindsForListeners, + L4Routes: map[L4RouteKey]*L4Route{}, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), + }, + { + Name: "listener-443-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{CreateRouteKey(hr3): routeHR3}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "listener-443-2", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[2], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + }, + { + Name: "listener-8443", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[3], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, - SupportedKinds: supportedKindsForListeners, - L4Routes: map[L4RouteKey]*L4Route{}, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), }, - { - Name: "listener-443-1", - Source: gw1.Spec.Listeners[1], - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{CreateRouteKey(hr3): routeHR3}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Policies: []*Policy{processedGwPolicy}, + NginxProxy: &NginxProxy{ + Source: np1, + Valid: true, }, - { - Name: "listener-443-2", - Source: gw1.Spec.Listeners[2], - Valid: true, - Attachable: true, - L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, - Routes: map[RouteKey]*L7Route{}, - SupportedKinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, - { - Name: "listener-8443", - Source: gw1.Spec.Listeners[3], - Valid: true, - Attachable: true, - L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, - Routes: map[RouteKey]*L7Route{}, - SupportedKinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-my-class", + }, + }, + {Namespace: testNs, Name: "gateway-2"}: { + Source: gw2.Source, + Listeners: []*Listener{ + { + Name: "listener-80-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: supportedKindsForListeners, + L4Routes: map[L4RouteKey]*L4Route{}, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), + }, + { + Name: "listener-443-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "listener-443-2", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[2], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + }, + { + Name: "listener-8443", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[3], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: np2, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, + }, + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-my-class", + }, }, - Valid: true, - Policies: []*Policy{processedGwPolicy}, - }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Namespace: testNs, Name: "gateway-2"}: gw2, }, Routes: map[RouteKey]*L7Route{ CreateRouteKey(hr1): routeHR1, @@ -904,8 +1106,12 @@ func TestBuildGraph(t *testing.T) { client.ObjectKeyFromObject(ns): ns, }, ReferencedServices: map[types.NamespacedName]*ReferencedService{ - client.ObjectKeyFromObject(svc): {}, - client.ObjectKeyFromObject(svc1): {}, + client.ObjectKeyFromObject(svc): { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: testNs, Name: "gateway-1"}: {}}, + }, + client.ObjectKeyFromObject(svc1): { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: testNs, Name: "gateway-1"}: {}}, + }, }, ReferencedCaCertConfigMaps: map[types.NamespacedName]*CaCertConfigMap{ client.ObjectKeyFromObject(cm): { @@ -918,18 +1124,24 @@ func TestBuildGraph(t *testing.T) { BackendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ client.ObjectKeyFromObject(btp.Source): &btp, }, - NginxProxy: &NginxProxy{ - Source: proxy, - Valid: true, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(npGlobal): { + Source: npGlobal, + Valid: true, + }, + client.ObjectKeyFromObject(np1): { + Source: np1, + Valid: true, + }, + client.ObjectKeyFromObject(np2): { + Source: np2, + Valid: true, + }, }, NGFPolicies: map[PolicyKey]*Policy{ hrPolicyKey: processedRoutePolicy, gwPolicyKey: processedGwPolicy, }, - GlobalSettings: &policies.GlobalSettings{ - NginxProxyValid: true, - TelemetryEnabled: true, - }, SnippetsFilters: map[types.NamespacedName]*SnippetsFilter{ client.ObjectKeyFromObject(unreferencedSnippetsFilter): processedUnrefSnippetsFilter, client.ObjectKeyFromObject(referencedSnippetsFilter): processedRefSnippetsFilter, @@ -953,9 +1165,10 @@ func TestBuildGraph(t *testing.T) { Spec: gatewayv1.GatewayClassSpec{ ControllerName: controllerName, ParametersRef: &gatewayv1.ParametersReference{ - Group: gatewayv1.Group("gateway.nginx.org"), - Kind: gatewayv1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", + Group: gatewayv1.Group("gateway.nginx.org"), + Kind: gatewayv1.Kind(kinds.NginxProxy), + Name: "np-global", + Namespace: helpers.GetPointer(gatewayv1.Namespace(testNs)), }, }, } @@ -1011,7 +1224,6 @@ func TestBuildGraph(t *testing.T) { GenericValidator: &validationfakes.FakeGenericValidator{}, PolicyValidator: fakePolicyValidator, }, - protectedPorts, ) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) @@ -1095,15 +1307,17 @@ func TestIsReferenced(t *testing.T) { endpointSliceNotInGraph := createEndpointSlice("endpointSliceNotInGraph", "serviceNotInGraph") emptyEndpointSlice := &discoveryV1.EndpointSlice{} - gw := &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + gw := map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, }, + Valid: true, }, - Valid: true, } nsNotInGraphButInGateway := &v1.Namespace{ @@ -1134,32 +1348,20 @@ func TestIsReferenced(t *testing.T) { }, } - gcWithNginxProxy := &GatewayClass{ - Source: &gatewayv1.GatewayClass{ - Spec: gatewayv1.GatewayClassSpec{ - ParametersRef: &gatewayv1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: gatewayv1.Kind(kinds.NginxProxy), - Name: "nginx-proxy-in-gc", - }, - }, - }, - } - - npNotInGatewayClass := &ngfAPI.NginxProxy{ + npNotReferenced := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy", + Name: "nginx-proxy-not-ref", }, } - npInGatewayClass := &ngfAPI.NginxProxy{ + npReferenced := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy-in-gc", + Name: "nginx-proxy-ref", }, } graph := &Graph{ - Gateway: gw, + Gateways: gw, ReferencedSecrets: map[types.NamespacedName]*Secret{ client.ObjectKeyFromObject(baseSecret): { Source: baseSecret, @@ -1179,6 +1381,11 @@ func TestIsReferenced(t *testing.T) { }), }, }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(npReferenced): { + Source: npReferenced, + }, + }, } tests := []struct { @@ -1309,16 +1516,14 @@ func TestIsReferenced(t *testing.T) { // NginxProxy tests { - name: "NginxProxy is referenced in GatewayClass", - resource: npInGatewayClass, - gc: gcWithNginxProxy, + name: "NginxProxy is referenced", + resource: npReferenced, graph: graph, expected: true, }, { - name: "NginxProxy is not referenced in GatewayClass", - resource: npNotInGatewayClass, - gc: gcWithNginxProxy, + name: "NginxProxy is not referenced", + resource: npNotReferenced, graph: graph, expected: false, }, @@ -1352,17 +1557,16 @@ func TestIsNGFPolicyRelevant(t *testing.T) { getGraph := func() *Graph { return &Graph{ - Gateway: &Gateway{ - Source: &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "test", + Gateways: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gw"}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "test", + }, }, }, }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Namespace: "test", Name: "ignored"}: {}, - }, Routes: map[RouteKey]*L7Route{ hrKey: {}, grKey: {}, @@ -1421,13 +1625,6 @@ func TestIsNGFPolicyRelevant(t *testing.T) { nsname: types.NamespacedName{Namespace: "test", Name: "ref-gw"}, expRelevant: true, }, - { - name: "relevant; policy references an ignored gateway", - graph: getGraph(), - policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "ignored")), - nsname: types.NamespacedName{Namespace: "test", Name: "ref-ignored"}, - expRelevant: true, - }, { name: "relevant; policy references an httproute in the graph", graph: getGraph(), @@ -1466,7 +1663,7 @@ func TestIsNGFPolicyRelevant(t *testing.T) { { name: "irrelevant; policy references a Gateway, but the graph's Gateway is nil", graph: getModifiedGraph(func(g *Graph) *Graph { - g.Gateway = nil + g.Gateways = nil return g }), policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "diff")), @@ -1476,7 +1673,8 @@ func TestIsNGFPolicyRelevant(t *testing.T) { { name: "irrelevant; policy references a Gateway, but the graph's Gateway.Source is nil", graph: getModifiedGraph(func(g *Graph) *Graph { - g.Gateway.Source = nil + gw := g.Gateways[types.NamespacedName{Namespace: "test", Name: "gw"}] + gw.Source = nil return g }), policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "diff")), @@ -1549,3 +1747,38 @@ func TestIsNGFPolicyRelevantPanics(t *testing.T) { g.Expect(isRelevant).To(Panic()) } + +func TestGatewayExists(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tests := []struct { + gateways map[types.NamespacedName]*Gateway + gwNsName types.NamespacedName + name string + expectedResult bool + }{ + { + name: "gateway exists", + gwNsName: types.NamespacedName{Namespace: "test", Name: "gw"}, + gateways: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gw"}: {}, + {Namespace: "test", Name: "gw2"}: {}, + }, + expectedResult: true, + }, + { + name: "gateway does not exist", + gwNsName: types.NamespacedName{Namespace: "test", Name: "gw"}, + gateways: nil, + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g.Expect(gatewayExists(test.gwNsName, test.gateways)).To(Equal(test.expectedResult)) + }) + } +} diff --git a/internal/mode/static/state/graph/grpcroute.go b/internal/mode/static/state/graph/grpcroute.go index aaacdb0fff..f114a130ee 100644 --- a/internal/mode/static/state/graph/grpcroute.go +++ b/internal/mode/static/state/graph/grpcroute.go @@ -19,8 +19,7 @@ import ( func buildGRPCRoute( validator validation.HTTPFieldsValidator, ghr *v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, - http2disabled bool, + gws map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) *L7Route { r := &L7Route{ @@ -28,7 +27,7 @@ func buildGRPCRoute( RouteType: RouteTypeGRPC, } - sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gws) if err != nil { r.Valid = false @@ -40,14 +39,6 @@ func buildGRPCRoute( } r.ParentRefs = sectionNameRefs - if http2disabled { - r.Valid = false - msg := "HTTP2 is disabled - cannot configure GRPCRoutes" - r.Conditions = append(r.Conditions, staticConds.NewRouteUnsupportedConfiguration(msg)) - - return r - } - if err := validateHostnames( ghr.Spec.Hostnames, field.NewPath("spec").Child("hostnames"), @@ -78,9 +69,8 @@ func buildGRPCMirrorRoutes( routes map[RouteKey]*L7Route, l7route *L7Route, route *v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, - http2disabled bool, ) { for idx, rule := range l7route.Spec.Rules { if rule.Filters.Valid { @@ -110,8 +100,7 @@ func buildGRPCMirrorRoutes( mirrorRoute := buildGRPCRoute( validation.SkipValidator{}, tmpMirrorRoute, - gatewayNsNames, - http2disabled, + gateways, snippetsFilters, ) diff --git a/internal/mode/static/state/graph/grpcroute_test.go b/internal/mode/static/state/graph/grpcroute_test.go index 5417208d9e..255a201186 100644 --- a/internal/mode/static/state/graph/grpcroute_test.go +++ b/internal/mode/static/state/graph/grpcroute_test.go @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" @@ -84,12 +84,27 @@ func TestBuildGRPCRoutes(t *testing.T) { t.Parallel() gwNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + snippetsFilterRef := v1.GRPCRouteFilter{ Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ Name: "sf", Kind: kinds.SnippetsFilter, - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, }, } @@ -111,15 +126,15 @@ func TestBuildGRPCRoutes(t *testing.T) { client.ObjectKeyFromObject(grWrongGateway): grWrongGateway, } - sf := &ngfAPI.SnippetsFilter{ + sf := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "sf", }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextHTTP, + Context: ngfAPIv1alpha1.NginxContextHTTP, Value: "http snippet", }, }, @@ -127,12 +142,12 @@ func TestBuildGRPCRoutes(t *testing.T) { } tests := []struct { - expected map[RouteKey]*L7Route - name string - gwNsNames []types.NamespacedName + expected map[RouteKey]*L7Route + gateways map[types.NamespacedName]*Gateway + name string }{ { - gwNsNames: []types.NamespacedName{gwNsName}, + gateways: gateways, expected: map[RouteKey]*L7Route{ CreateRouteKey(gr): { RouteType: RouteTypeGRPC, @@ -140,7 +155,7 @@ func TestBuildGRPCRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: CreateParentRefGateway(gateways[gwNsName]), SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -159,8 +174,8 @@ func TestBuildGRPCRoutes(t *testing.T) { ResolvedExtensionRef: &ExtensionRefFilter{ SnippetsFilter: &SnippetsFilter{ Source: sf, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTP: "http snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTP: "http snippet", }, Valid: true, Referenced: true, @@ -187,22 +202,14 @@ func TestBuildGRPCRoutes(t *testing.T) { name: "normal case", }, { - gwNsNames: []types.NamespacedName{}, - expected: nil, - name: "no gateways", + gateways: nil, + expected: nil, + name: "no gateways", }, } validator := &validationfakes.FakeHTTPFieldsValidator{} - npCfg := &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - DisableHTTP2: false, - }, - }, - } - for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -212,8 +219,8 @@ func TestBuildGRPCRoutes(t *testing.T) { client.ObjectKeyFromObject(sf): { Source: sf, Valid: true, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTP: "http snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTP: "http snippet", }, }, } @@ -222,8 +229,7 @@ func TestBuildGRPCRoutes(t *testing.T) { validator, map[types.NamespacedName]*v1.HTTPRoute{}, grRoutes, - test.gwNsNames, - npCfg, + test.gateways, snippetsFilters, ) g.Expect(helpers.Diff(test.expected, routes)).To(BeEmpty()) @@ -233,7 +239,20 @@ func TestBuildGRPCRoutes(t *testing.T) { func TestBuildGRPCRoute(t *testing.T) { t.Parallel() - gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + + gw := &Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + } + gatewayNsName := client.ObjectKeyFromObject(gw.Source) methodMatchRule := createGRPCMethodMatch("myService", "myMethod", "Exact") headersMatchRule := createGRPCHeadersMatch("Exact", "MyHeader", "SomeValue") @@ -353,7 +372,7 @@ func TestBuildGRPCRoute(t *testing.T) { grValidFilterRule := createGRPCMethodMatch("myService", "myMethod", "Exact") grValidHeaderMatch := createGRPCHeadersMatch("RegularExpression", "MyHeader", "headers-[a-z]+") validSnippetsFilterRef := &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "sf", } @@ -414,7 +433,7 @@ func TestBuildGRPCRoute(t *testing.T) { { Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "does-not-exist", }, @@ -433,7 +452,7 @@ func TestBuildGRPCRoute(t *testing.T) { { Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "does-not-exist", }, @@ -492,11 +511,10 @@ func TestBuildGRPCRoute(t *testing.T) { } tests := []struct { - validator *validationfakes.FakeHTTPFieldsValidator - gr *v1.GRPCRoute - expected *L7Route - name string - http2disabled bool + validator *validationfakes.FakeHTTPFieldsValidator + gr *v1.GRPCRoute + expected *L7Route + name string }{ { validator: createAllValidValidator(), @@ -507,7 +525,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grBoth.Spec.ParentRefs[0].SectionName, }, }, @@ -548,7 +566,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grEmptyMatch.Spec.ParentRefs[0].SectionName, }, }, @@ -580,7 +598,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grValidFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -623,7 +641,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesEmptyMethodFields.Spec.ParentRefs[0].SectionName, }, }, @@ -667,7 +685,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesInvalidMethodFields.Spec.ParentRefs[0].SectionName, }, }, @@ -704,28 +722,6 @@ func TestBuildGRPCRoute(t *testing.T) { }, name: "invalid route with duplicate sectionName", }, - { - validator: createAllValidValidator(), - gr: grBoth, - expected: &L7Route{ - RouteType: RouteTypeGRPC, - Source: grBoth, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: gatewayNsName, - SectionName: grBoth.Spec.ParentRefs[0].SectionName, - }, - }, - Conditions: []conditions.Condition{ - staticConds.NewRouteUnsupportedConfiguration( - `HTTP2 is disabled - cannot configure GRPCRoutes`, - ), - }, - }, - http2disabled: true, - name: "invalid route with disabled http2", - }, { validator: createAllValidValidator(), gr: grOneInvalid, @@ -737,7 +733,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grOneInvalid.Spec.ParentRefs[0].SectionName, }, }, @@ -783,7 +779,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHeadersInvalidType.Spec.ParentRefs[0].SectionName, }, }, @@ -821,7 +817,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHeadersEmptyType.Spec.ParentRefs[0].SectionName, }, }, @@ -859,7 +855,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesNilMethodType.Spec.ParentRefs[0].SectionName, }, }, @@ -896,7 +892,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -941,7 +937,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHostname.Spec.ParentRefs[0].SectionName, }, }, @@ -964,7 +960,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -1002,7 +998,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -1041,7 +1037,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidAndUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -1075,7 +1071,9 @@ func TestBuildGRPCRoute(t *testing.T) { }, } - gatewayNsNames := []types.NamespacedName{gatewayNsName} + gws := map[types.NamespacedName]*Gateway{ + gatewayNsName: gw, + } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1085,7 +1083,7 @@ func TestBuildGRPCRoute(t *testing.T) { snippetsFilters := map[types.NamespacedName]*SnippetsFilter{ {Namespace: "test", Name: "sf"}: {Valid: true}, } - route := buildGRPCRoute(test.validator, test.gr, gatewayNsNames, test.http2disabled, snippetsFilters) + route := buildGRPCRoute(test.validator, test.gr, gws, snippetsFilters) g.Expect(helpers.Diff(test.expected, route)).To(BeEmpty()) }) } @@ -1093,8 +1091,24 @@ func TestBuildGRPCRoute(t *testing.T) { func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { t.Parallel() + gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gatewayNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + // Create a route with a request mirror filter and another random filter mirrorFilter := v1.GRPCRouteFilter{ Type: v1.GRPCRouteFilterRequestMirror, @@ -1172,7 +1186,7 @@ func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gateways[gatewayNsName]), SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -1217,16 +1231,15 @@ func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { } validator := &validationfakes.FakeHTTPFieldsValidator{} - gatewayNsNames := []types.NamespacedName{gatewayNsName} snippetsFilters := map[types.NamespacedName]*SnippetsFilter{} g := NewWithT(t) routes := map[RouteKey]*L7Route{} - l7route := buildGRPCRoute(validator, gr, gatewayNsNames, false, snippetsFilters) + l7route := buildGRPCRoute(validator, gr, gateways, snippetsFilters) g.Expect(l7route).NotTo(BeNil()) - buildGRPCMirrorRoutes(routes, l7route, gr, gatewayNsNames, snippetsFilters, false) + buildGRPCMirrorRoutes(routes, l7route, gr, gateways, snippetsFilters) obj, ok := expectedMirrorRoute.Source.(*v1.GRPCRoute) g.Expect(ok).To(BeTrue()) diff --git a/internal/mode/static/state/graph/httproute.go b/internal/mode/static/state/graph/httproute.go index f10df6965b..408d742dd5 100644 --- a/internal/mode/static/state/graph/httproute.go +++ b/internal/mode/static/state/graph/httproute.go @@ -25,7 +25,7 @@ var ( func buildHTTPRoute( validator validation.HTTPFieldsValidator, ghr *v1.HTTPRoute, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) *L7Route { r := &L7Route{ @@ -33,7 +33,7 @@ func buildHTTPRoute( RouteType: RouteTypeHTTP, } - sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gws) if err != nil { r.Valid = false @@ -75,7 +75,7 @@ func buildHTTPMirrorRoutes( routes map[RouteKey]*L7Route, l7route *L7Route, route *v1.HTTPRoute, - gatewayNsNames []types.NamespacedName, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) { for idx, rule := range l7route.Spec.Rules { @@ -106,7 +106,7 @@ func buildHTTPMirrorRoutes( mirrorRoute := buildHTTPRoute( validation.SkipValidator{}, tmpMirrorRoute, - gatewayNsNames, + gateways, snippetsFilters, ) diff --git a/internal/mode/static/state/graph/httproute_test.go b/internal/mode/static/state/graph/httproute_test.go index ce0f794bc6..ee0d2dac27 100644 --- a/internal/mode/static/state/graph/httproute_test.go +++ b/internal/mode/static/state/graph/httproute_test.go @@ -91,8 +91,21 @@ func addFilterToPath(hr *gatewayv1.HTTPRoute, path string, filter gatewayv1.HTTP func TestBuildHTTPRoutes(t *testing.T) { t.Parallel() + gwNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + }, + } + hr := createHTTPRoute("hr-1", gwNsName.Name, "example.com", "/") snippetsFilterRef := gatewayv1.HTTPRouteFilter{ Type: gatewayv1.HTTPRouteFilterExtensionRef, @@ -133,12 +146,12 @@ func TestBuildHTTPRoutes(t *testing.T) { } tests := []struct { - expected map[RouteKey]*L7Route - name string - gwNsNames []types.NamespacedName + expected map[RouteKey]*L7Route + gateways map[types.NamespacedName]*Gateway + name string }{ { - gwNsNames: []types.NamespacedName{gwNsName}, + gateways: gateways, expected: map[RouteKey]*L7Route{ CreateRouteKey(hr): { Source: hr, @@ -146,7 +159,7 @@ func TestBuildHTTPRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: CreateParentRefGateway(gateways[gwNsName]), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -193,9 +206,9 @@ func TestBuildHTTPRoutes(t *testing.T) { name: "normal case", }, { - gwNsNames: []types.NamespacedName{}, - expected: nil, - name: "no gateways", + gateways: map[types.NamespacedName]*Gateway{}, + expected: nil, + name: "no gateways", }, } @@ -220,8 +233,7 @@ func TestBuildHTTPRoutes(t *testing.T) { validator, hrRoutes, map[types.NamespacedName]*gatewayv1.GRPCRoute{}, - test.gwNsNames, - nil, + test.gateways, snippetsFilters, ) g.Expect(helpers.Diff(test.expected, routes)).To(BeEmpty()) @@ -236,7 +248,16 @@ func TestBuildHTTPRoute(t *testing.T) { invalidRedirectHostname = "invalid.example.com" ) - gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gw := &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + } + gatewayNsName := client.ObjectKeyFromObject(gw.Source) // route with valid filter validFilter := gatewayv1.HTTPRouteFilter{ @@ -358,7 +379,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -401,7 +422,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatchesEmptyPathType.Spec.ParentRefs[0].SectionName, }, }, @@ -447,7 +468,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatchesEmptyPathValue.Spec.ParentRefs[0].SectionName, }, }, @@ -490,7 +511,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidHostname.Spec.ParentRefs[0].SectionName, }, }, @@ -513,7 +534,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatches.Spec.ParentRefs[0].SectionName, }, }, @@ -550,7 +571,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -588,7 +609,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidMatches.Spec.ParentRefs[0].SectionName, }, }, @@ -635,7 +656,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidMatchesAndInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -694,7 +715,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -741,7 +762,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrValidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -783,7 +804,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -821,7 +842,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -860,7 +881,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidAndUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -896,7 +917,9 @@ func TestBuildHTTPRoute(t *testing.T) { }, } - gatewayNsNames := []types.NamespacedName{gatewayNsName} + gws := map[types.NamespacedName]*Gateway{ + gatewayNsName: gw, + } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -907,7 +930,7 @@ func TestBuildHTTPRoute(t *testing.T) { {Namespace: "test", Name: "sf"}: {Valid: true}, } - route := buildHTTPRoute(test.validator, test.hr, gatewayNsNames, snippetsFilters) + route := buildHTTPRoute(test.validator, test.hr, gws, snippetsFilters) g.Expect(helpers.Diff(test.expected, route)).To(BeEmpty()) }) } @@ -915,8 +938,24 @@ func TestBuildHTTPRoute(t *testing.T) { func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { t.Parallel() + gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gatewayNsName: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + // Create a route with a request mirror filter and another random filter mirrorFilter := gatewayv1.HTTPRouteFilter{ Type: gatewayv1.HTTPRouteFilterRequestMirror, @@ -974,7 +1013,7 @@ func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gateways[gatewayNsName]), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -1018,16 +1057,15 @@ func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { } validator := &validationfakes.FakeHTTPFieldsValidator{} - gatewayNsNames := []types.NamespacedName{gatewayNsName} snippetsFilters := map[types.NamespacedName]*SnippetsFilter{} g := NewWithT(t) routes := map[RouteKey]*L7Route{} - l7route := buildHTTPRoute(validator, hr, gatewayNsNames, snippetsFilters) + l7route := buildHTTPRoute(validator, hr, gateways, snippetsFilters) g.Expect(l7route).NotTo(BeNil()) - buildHTTPMirrorRoutes(routes, l7route, hr, gatewayNsNames, snippetsFilters) + buildHTTPMirrorRoutes(routes, l7route, hr, gateways, snippetsFilters) obj, ok := expectedMirrorRoute.Source.(*gatewayv1.HTTPRoute) g.Expect(ok).To(BeTrue()) diff --git a/internal/mode/static/state/graph/multiple_gateways_test.go b/internal/mode/static/state/graph/multiple_gateways_test.go new file mode 100644 index 0000000000..497fbd9680 --- /dev/null +++ b/internal/mode/static/state/graph/multiple_gateways_test.go @@ -0,0 +1,895 @@ +package graph + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + "sigs.k8s.io/gateway-api/apis/v1beta1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation/validationfakes" +) + +const ( + controllerName = "nginx" + gcName = "my-gateway-class" +) + +var ( + plusSecret = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ngf", + Name: "plus-secret", + }, + Data: map[string][]byte{ + "license.jwt": []byte("license"), + }, + } + convertedPlusSecret = map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + Content: []byte("license"), + FieldName: "license.jwt", + }, + }, + } + + supportedHTTPGRPC = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + {Kind: gatewayv1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + supportedTLS = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + + allowedRoutesHTTPGRPC = &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + {Kind: kinds.GRPCRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + } + allowedRoutesTLS = &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + } +) + +func createGateway(name, namespace, nginxProxyName string, listeners []gatewayv1.Listener) *gatewayv1.Gateway { + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: gatewayv1.GatewaySpec{ + GatewayClassName: gcName, + Listeners: listeners, + }, + } + + if nginxProxyName != "" { + gateway.Spec.Infrastructure = &gatewayv1.GatewayInfrastructure{ + ParametersRef: &gatewayv1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: nginxProxyName, + }, + } + } + + return gateway +} + +func createGatewayClass(name, controllerName, npName, npNamespace string) *gatewayv1.GatewayClass { + if npName == "" { + return &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: gatewayv1.GatewayController(controllerName), + }, + } + } + return &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: gatewayv1.GatewayController(controllerName), + ParametersRef: &gatewayv1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: npName, + Namespace: helpers.GetPointer(gatewayv1.Namespace(npNamespace)), + }, + }, + } +} + +func convertedGatewayClass( + gc *gatewayv1.GatewayClass, + nginxProxy ngfAPIv1alpha2.NginxProxy, + cond ...conditions.Condition, +) *GatewayClass { + return &GatewayClass{ + Source: gc, + NginxProxy: &NginxProxy{ + Source: &nginxProxy, + Valid: true, + }, + Valid: true, + Conditions: cond, + } +} + +func createNginxProxy(name, namespace string, spec ngfAPIv1alpha2.NginxProxySpec) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: spec, + } +} + +func convertedGateway( + gw *gatewayv1.Gateway, + nginxProxy *NginxProxy, + effectiveNp *EffectiveNginxProxy, + listeners []*Listener, + conds []conditions.Condition, +) *Gateway { + return &Gateway{ + Source: gw, + Valid: true, + NginxProxy: nginxProxy, + EffectiveNginxProxy: effectiveNp, + Listeners: listeners, + Conditions: conds, + DeploymentName: types.NamespacedName{ + Name: gw.Name + "-" + gcName, + Namespace: gw.Namespace, + }, + } +} + +func createListener( + name, hostname string, + port int32, + protocol gatewayv1.ProtocolType, + tlsConfig *gatewayv1.GatewayTLSConfig, + allowedRoutes *gatewayv1.AllowedRoutes, +) gatewayv1.Listener { + listener := gatewayv1.Listener{ + Name: gatewayv1.SectionName(name), + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer(hostname)), + Port: gatewayv1.PortNumber(port), + Protocol: protocol, + AllowedRoutes: allowedRoutes, + } + + if tlsConfig != nil { + listener.TLS = tlsConfig + } + + return listener +} + +func convertListener( + listener gatewayv1.Listener, + gatewayNSName types.NamespacedName, + secret *v1.Secret, + supportedKinds []gatewayv1.RouteGroupKind, + l7Route map[RouteKey]*L7Route, + l4Route map[L4RouteKey]*L4Route, +) *Listener { + l := &Listener{ + Name: string(listener.Name), + GatewayName: gatewayNSName, + Source: listener, + L4Routes: l4Route, + Routes: l7Route, + Valid: true, + SupportedKinds: supportedKinds, + Attachable: true, + } + + if secret != nil { + l.ResolvedSecret = helpers.GetPointer(client.ObjectKeyFromObject(secret)) + } + return l +} + +// Test_MultipleGateways_WithNginxProxy tests how nginx proxy config is inherited or overwritten +// when multiple gateways are present in the cluster. +func Test_MultipleGateways_WithNginxProxy(t *testing.T) { + nginxProxyGlobal := createNginxProxy("nginx-proxy", testNs, ngfAPIv1alpha2.NginxProxySpec{ + DisableHTTP2: helpers.GetPointer(true), + }) + + nginxProxyGateway1 := createNginxProxy("nginx-proxy-gateway-1", testNs, ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + }) + + nginxProxyGateway3 := createNginxProxy("nginx-proxy-gateway-3", "test2", ngfAPIv1alpha2.NginxProxySpec{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + DisableHTTP2: helpers.GetPointer(false), + }) + + gatewayClass := createGatewayClass(gcName, controllerName, "nginx-proxy", testNs) + gateway1 := createGateway("gateway-1", testNs, "", []gatewayv1.Listener{}) + gateway2 := createGateway("gateway-2", testNs, "", []gatewayv1.Listener{}) + gateway3 := createGateway("gateway-3", "test2", "", []gatewayv1.Listener{}) + + gateway1withNP := createGateway("gateway-1", testNs, "nginx-proxy-gateway-1", []gatewayv1.Listener{}) + gateway3withNP := createGateway("gateway-3", "test2", "nginx-proxy-gateway-3", []gatewayv1.Listener{}) + + gcConditions := []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()} + + tests := []struct { + clusterState ClusterState + expGraph *Graph + name string + }{ + { + name: "gateway class with nginx proxy, multiple gateways inheriting settings from global nginx proxy", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1): gateway1, + client.ObjectKeyFromObject(gateway2): gateway2, + client.ObjectKeyFromObject(gateway3): gateway3, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, gcConditions...), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1): convertedGateway( + gateway1, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway3): convertedGateway( + gateway3, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): { + Source: nginxProxyGlobal, + Valid: true, + }, + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + }, + }, + { + name: "gateway class with nginx proxy, multiple gateways with their own referenced nginx proxy", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1withNP): gateway1withNP, + client.ObjectKeyFromObject(gateway2): gateway2, + client.ObjectKeyFromObject(gateway3withNP): gateway3withNP, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + client.ObjectKeyFromObject(nginxProxyGateway1): nginxProxyGateway1, + client.ObjectKeyFromObject(nginxProxyGateway3): nginxProxyGateway3, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, gcConditions...), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1withNP): convertedGateway( + gateway1withNP, + &NginxProxy{Source: nginxProxyGateway1, Valid: true}, + &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + DisableHTTP2: helpers.GetPointer(true), + }, + []*Listener{}, + gcConditions, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway3withNP): convertedGateway( + gateway3withNP, + &NginxProxy{Source: nginxProxyGateway3, Valid: true}, + &EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + DisableHTTP2: helpers.GetPointer(false), + }, + []*Listener{}, + gcConditions, + ), + }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + client.ObjectKeyFromObject(nginxProxyGateway1): {Source: nginxProxyGateway1, Valid: true}, + client.ObjectKeyFromObject(nginxProxyGateway3): {Source: nginxProxyGateway3, Valid: true}, + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + format.MaxLength = 10000000 + + fakePolicyValidator := &validationfakes.FakePolicyValidator{} + + result := BuildGraph( + test.clusterState, + controllerName, + gcName, + map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + FieldName: "license.jwt", + }, + }, + }, + validation.Validators{ + HTTPFieldsValidator: &validationfakes.FakeHTTPFieldsValidator{}, + GenericValidator: &validationfakes.FakeGenericValidator{}, + PolicyValidator: fakePolicyValidator, + }, + ) + + g.Expect(helpers.Diff(test.expGraph, result)).To(BeEmpty()) + }) + } +} + +// Test_MultipleGateways_WithListeners tests how listeners attach and interact with multiple gateways. +func Test_MultipleGateways_WithListeners(t *testing.T) { + nginxProxyGlobal := createNginxProxy("nginx-proxy", testNs, ngfAPIv1alpha2.NginxProxySpec{ + DisableHTTP2: helpers.GetPointer(true), + }) + gatewayClass := createGatewayClass(gcName, controllerName, "nginx-proxy", testNs) + + secretDiffNs := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "secret-ns", + Name: "secret", + }, + Data: map[string][]byte{ + v1.TLSCertKey: cert, + v1.TLSPrivateKeyKey: key, + }, + Type: v1.SecretTypeTLS, + } + + rgSecretsToGateway := &v1beta1.ReferenceGrant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rg-secret-to-gateway", + Namespace: "secret-ns", + }, + Spec: v1beta1.ReferenceGrantSpec{ + From: []v1beta1.ReferenceGrantFrom{ + { + Group: gatewayv1.GroupName, + Kind: kinds.Gateway, + Namespace: gatewayv1.Namespace(testNs), + }, + }, + To: []v1beta1.ReferenceGrantTo{ + { + Group: "core", + Kind: "Secret", + Name: helpers.GetPointer[gatewayv1.ObjectName]("secret"), + }, + }, + }, + } + + tlsConfigDiffNsSecret := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secretDiffNs.Name), + Namespace: helpers.GetPointer(gatewayv1.Namespace(secretDiffNs.Namespace)), + }, + }, + } + + gateway1 := createGateway("gateway-1", testNs, "nginx-proxy", []gatewayv1.Listener{ + createListener( + "listener-tls-mode-terminate", + "*.example.com", + 443, + gatewayv1.HTTPSProtocolType, + tlsConfigDiffNsSecret, + allowedRoutesHTTPGRPC, + ), + }) + gateway2 := createGateway("gateway-2", testNs, "nginx-proxy", []gatewayv1.Listener{ + createListener( + "listener-tls-mode-terminate", + "*.example.com", + 443, + gatewayv1.HTTPSProtocolType, + tlsConfigDiffNsSecret, + allowedRoutesHTTPGRPC, + ), + }) + + tlsConfigPassthrough := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough), + } + + secretSameNs := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "secret", + }, + Data: map[string][]byte{ + v1.TLSCertKey: cert, + v1.TLSPrivateKeyKey: key, + }, + Type: v1.SecretTypeTLS, + } + + gatewayTLSConfigSameNs := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secretSameNs.Name), + Namespace: (*gatewayv1.Namespace)(&secretSameNs.Namespace), + }, + }, + } + + // valid http, https and tls listeners + listeners := []gatewayv1.Listener{ + createListener( + "foo-listener-http", + "foo.example.com", + 80, + gatewayv1.HTTPProtocolType, + nil, + allowedRoutesHTTPGRPC, + ), + createListener( + "foo-listener-https", + "tea.example.com", + 443, + gatewayv1.HTTPSProtocolType, + gatewayTLSConfigSameNs, + allowedRoutesHTTPGRPC, + ), + createListener( + "listener-tls-mode-passthrough", + "cafe.example.com", + 8443, + gatewayv1.TLSProtocolType, + tlsConfigPassthrough, + allowedRoutesTLS, + ), + } + gatewayMultipleListeners1 := createGateway("gateway-multiple-listeners-1", testNs, "nginx-proxy", listeners) + gatewayMultipleListeners2 := createGateway("gateway-multiple-listeners-2", testNs, "nginx-proxy", listeners) + gatewayMultipleListeners3 := createGateway("gateway-multiple-listeners-3", testNs, "nginx-proxy", listeners) + + // valid TLS and https listener same port and hostname + gatewayTLSSamePortHostname := createGateway( + "gateway-tls-foo", + testNs, + "nginx-proxy", + []gatewayv1.Listener{ + createListener( + "foo-listener-tls", + "foo.example.com", + 443, + gatewayv1.TLSProtocolType, + tlsConfigPassthrough, + allowedRoutesTLS, + ), + }, + ) + + gatewayHTTPSSamePortHostname := createGateway( + "gateway-http-foo", + testNs, + "nginx-proxy", + []gatewayv1.Listener{ + createListener( + "foo-listener-tls", + "foo.example.com", + 443, + gatewayv1.HTTPSProtocolType, + gatewayTLSConfigSameNs, + allowedRoutesHTTPGRPC, + ), + }, + ) + + tests := []struct { + clusterState ClusterState + expGraph *Graph + name string + }{ + { + name: "multiple gateways with tls listeners, have reference grants to access the secret", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretDiffNs): secretDiffNs, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1): gateway1, + client.ObjectKeyFromObject(gateway2): gateway2, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + ReferenceGrants: map[types.NamespacedName]*v1beta1.ReferenceGrant{ + client.ObjectKeyFromObject(rgSecretsToGateway): rgSecretsToGateway, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1): convertedGateway( + gateway1, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gateway1.Spec.Listeners[0], + client.ObjectKeyFromObject(gateway1), + secretDiffNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gateway2.Spec.Listeners[0], + client.ObjectKeyFromObject(gateway2), + secretDiffNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretDiffNs): { + Source: secretDiffNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretDiffNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + { + name: "valid http, https and tls listeners across multiple gateways with same port references," + + "leads to no port conflict", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gatewayMultipleListeners1): gatewayMultipleListeners1, + client.ObjectKeyFromObject(gatewayMultipleListeners2): gatewayMultipleListeners2, + client.ObjectKeyFromObject(gatewayMultipleListeners3): gatewayMultipleListeners3, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretSameNs): secretSameNs, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gatewayMultipleListeners1): convertedGateway( + gatewayMultipleListeners1, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners1.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners1.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners1.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayMultipleListeners2): convertedGateway( + gatewayMultipleListeners2, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners2.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners2.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners2.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayMultipleListeners3): convertedGateway( + gatewayMultipleListeners3, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners3.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners3.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners3.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretSameNs): { + Source: secretSameNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretSameNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + { + name: "valid tls and https listeners across multiple gateways with same port and hostname causes no conflict", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gatewayTLSSamePortHostname): gatewayTLSSamePortHostname, + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname): gatewayHTTPSSamePortHostname, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretSameNs): secretSameNs, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gatewayTLSSamePortHostname): convertedGateway( + gatewayTLSSamePortHostname, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayTLSSamePortHostname.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayTLSSamePortHostname), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname): convertedGateway( + gatewayHTTPSSamePortHostname, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayHTTPSSamePortHostname.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretSameNs): { + Source: secretSameNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretSameNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + format.MaxLength = 10000000 + + fakePolicyValidator := &validationfakes.FakePolicyValidator{} + + result := BuildGraph( + test.clusterState, + controllerName, + gcName, + map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + FieldName: "license.jwt", + }, + }, + }, + validation.Validators{ + HTTPFieldsValidator: &validationfakes.FakeHTTPFieldsValidator{}, + GenericValidator: &validationfakes.FakeGenericValidator{}, + PolicyValidator: fakePolicyValidator, + }, + ) + + g.Expect(helpers.Diff(test.expGraph, result)).To(BeEmpty()) + }) + } +} diff --git a/internal/mode/static/state/graph/namespace.go b/internal/mode/static/state/graph/namespace.go index 481e4d749b..8cbda90f7e 100644 --- a/internal/mode/static/state/graph/namespace.go +++ b/internal/mode/static/state/graph/namespace.go @@ -10,12 +10,12 @@ import ( // a label that matches any of the Gateway Listener's label selector. func buildReferencedNamespaces( clusterNamespaces map[types.NamespacedName]*v1.Namespace, - gw *Gateway, + gateways map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*v1.Namespace { referencedNamespaces := make(map[types.NamespacedName]*v1.Namespace) for name, ns := range clusterNamespaces { - if isNamespaceReferenced(ns, gw) { + if isNamespaceReferenced(ns, gateways) { referencedNamespaces[name] = ns } } @@ -28,19 +28,21 @@ func buildReferencedNamespaces( // isNamespaceReferenced returns true if a given Namespace resource has a label // that matches any of the Gateway Listener's label selector. -func isNamespaceReferenced(ns *v1.Namespace, gw *Gateway) bool { - if gw == nil || ns == nil { +func isNamespaceReferenced(ns *v1.Namespace, gws map[types.NamespacedName]*Gateway) bool { + if ns == nil || len(gws) == 0 { return false } nsLabels := labels.Set(ns.GetLabels()) - for _, listener := range gw.Listeners { - if listener.AllowedRouteLabelSelector == nil { - // Can have listeners with AllowedRouteLabelSelector not set. - continue - } - if listener.AllowedRouteLabelSelector.Matches(nsLabels) { - return true + for _, gw := range gws { + for _, listener := range gw.Listeners { + if listener.AllowedRouteLabelSelector == nil { + // Can have listeners with AllowedRouteLabelSelector not set. + continue + } + if listener.AllowedRouteLabelSelector.Matches(nsLabels) { + return true + } } } diff --git a/internal/mode/static/state/graph/namespace_test.go b/internal/mode/static/state/graph/namespace_test.go index 372fd3d12d..af2e2ecc2b 100644 --- a/internal/mode/static/state/graph/namespace_test.go +++ b/internal/mode/static/state/graph/namespace_test.go @@ -4,9 +4,8 @@ import ( "testing" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" ) @@ -44,20 +43,22 @@ func TestBuildReferencedNamespaces(t *testing.T) { } tests := []struct { - gw *Gateway + gws map[types.NamespacedName]*Gateway expectedRefNS map[types.NamespacedName]*v1.Namespace name string }{ { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -65,20 +66,22 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway matches labels with one namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"peaches": "bananas"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"peaches": "bananas"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -87,60 +90,67 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway matches labels with two namespaces", }, { - gw: &Gateway{ - Listeners: []*Listener{}, - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{}, + Valid: true, + }, }, expectedRefNS: nil, name: "gateway has no Listeners", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - }, - { - Name: "listener-2", - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + }, + { + Name: "listener-2", + Valid: true, + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: nil, name: "gateway has multiple listeners with no AllowedRouteLabelSelector set", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + }, }, + Valid: true, }, - Valid: true, }, - expectedRefNS: nil, name: "gateway doesn't match labels with any namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -148,19 +158,21 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway has two listeners and only matches labels with one namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -173,7 +185,7 @@ func TestBuildReferencedNamespaces(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildReferencedNamespaces(clusterNamespaces, test.gw)).To(Equal(test.expectedRefNS)) + g.Expect(buildReferencedNamespaces(clusterNamespaces, test.gws)).To(Equal(test.expectedRefNS)) }) } } @@ -182,13 +194,13 @@ func TestIsNamespaceReferenced(t *testing.T) { t.Parallel() tests := []struct { ns *v1.Namespace - gw *Gateway + gws map[types.NamespacedName]*Gateway name string exp bool }{ { ns: nil, - gw: nil, + gws: nil, exp: false, name: "namespace and gateway are nil", }, @@ -198,15 +210,17 @@ func TestIsNamespaceReferenced(t *testing.T) { Name: "ns1", }, }, - gw: nil, + gws: nil, exp: false, name: "namespace is valid but gateway is nil", }, { ns: nil, - gw: &Gateway{ - Listeners: []*Listener{}, - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {Name: "ns1"}: { + Listeners: []*Listener{}, + Valid: true, + }, }, exp: false, name: "gateway is valid but namespace is nil", @@ -218,7 +232,7 @@ func TestIsNamespaceReferenced(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(isNamespaceReferenced(test.ns, test.gw)).To(Equal(test.exp)) + g.Expect(isNamespaceReferenced(test.ns, test.gws)).To(Equal(test.exp)) }) } } diff --git a/internal/mode/static/state/graph/nginxproxy.go b/internal/mode/static/state/graph/nginxproxy.go index 831a205120..3b72161233 100644 --- a/internal/mode/static/state/graph/nginxproxy.go +++ b/internal/mode/static/state/graph/nginxproxy.go @@ -1,15 +1,18 @@ package graph import ( + "encoding/json" + "fmt" "slices" "k8s.io/apimachinery/pkg/types" k8svalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" ) @@ -17,45 +20,185 @@ import ( // NginxProxy represents the NginxProxy resource. type NginxProxy struct { // Source is the source resource. - Source *ngfAPI.NginxProxy + Source *ngfAPIv1alpha2.NginxProxy // ErrMsgs contains the validation errors if they exist, to be included in the GatewayClass condition. ErrMsgs field.ErrorList // Valid shows whether the NginxProxy is valid. Valid bool } -// buildNginxProxy validates and returns the NginxProxy associated with the GatewayClass (if it exists). -func buildNginxProxy( - nps map[types.NamespacedName]*ngfAPI.NginxProxy, - gc *v1.GatewayClass, +// EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on the +// GatewayClass resource. This is the effective set of config that should be applied to the Gateway. +type EffectiveNginxProxy ngfAPIv1alpha2.NginxProxySpec + +// buildEffectiveNginxProxy builds the effective NginxProxy for the Gateway by merging the GatewayClass and Gateway +// NginxProxy resources. Fields specified on the Gateway NginxProxy override those set on the GatewayClass NginxProxy. +func buildEffectiveNginxProxy(gatewayClassNp, gatewayNp *NginxProxy) *EffectiveNginxProxy { + gcNpValid, gwNpValid := nginxProxyValid(gatewayClassNp), nginxProxyValid(gatewayNp) + if !gcNpValid && !gwNpValid { + return nil + } + + if !gcNpValid { + enp := EffectiveNginxProxy(*gatewayNp.Source.Spec.DeepCopy()) + return &enp + } + + if !gwNpValid { + enp := EffectiveNginxProxy(*gatewayClassNp.Source.Spec.DeepCopy()) + return &enp + } + + global := EffectiveNginxProxy(*gatewayClassNp.Source.Spec.DeepCopy()) + local := EffectiveNginxProxy(*gatewayNp.Source.Spec.DeepCopy()) + + // by marshaling the local config and then unmarshaling on top of the global config, + // we ensure that any unset local values are set with the global values + localBytes, err := json.Marshal(local) + if err != nil { + panic( + fmt.Sprintf( + "could not marshal NginxProxy resource referenced by Gateway %s", + client.ObjectKeyFromObject(gatewayNp.Source), + ), + ) + } + + err = json.Unmarshal(localBytes, &global) + if err != nil { + panic( + fmt.Sprintf( + "could not unmarshal NginxProxy resource referenced by GatewayClass %s", + client.ObjectKeyFromObject(gatewayClassNp.Source), + ), + ) + } + + // this json trick doesn't work for unsetting slices, so we need to do that manually. + if local.Telemetry != nil { + if local.Telemetry.DisabledFeatures != nil && len(local.Telemetry.DisabledFeatures) == 0 { + global.Telemetry.DisabledFeatures = []ngfAPIv1alpha2.DisableTelemetryFeature{} + } + + if local.Telemetry.SpanAttributes != nil && len(local.Telemetry.SpanAttributes) == 0 { + global.Telemetry.SpanAttributes = []ngfAPIv1alpha1.SpanAttribute{} + } + } + + if local.RewriteClientIP != nil { + if local.RewriteClientIP.TrustedAddresses != nil && len(local.RewriteClientIP.TrustedAddresses) == 0 { + global.RewriteClientIP.TrustedAddresses = []ngfAPIv1alpha2.RewriteClientIPAddress{} + } + } + + return &global +} + +func nginxProxyValid(np *NginxProxy) bool { + return np != nil && np.Source != nil && np.Valid +} + +func telemetryEnabledForNginxProxy(np *EffectiveNginxProxy) bool { + if np.Telemetry == nil || np.Telemetry.Exporter == nil || np.Telemetry.Exporter.Endpoint == nil { + return false + } + + if slices.Contains(np.Telemetry.DisabledFeatures, ngfAPIv1alpha2.DisableTracing) { + return false + } + + return true +} + +// MetricsEnabledForNginxProxy returns whether metrics is enabled, and the associated port if specified. +// By default, metrics are enabled. +func MetricsEnabledForNginxProxy(np *EffectiveNginxProxy) (*int32, bool) { + if np != nil && np.Metrics != nil { + if np.Metrics.Disable != nil && *np.Metrics.Disable { + return nil, false + } + return np.Metrics.Port, true + } + + return nil, true +} + +func processNginxProxies( + nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy, validator validation.GenericValidator, -) *NginxProxy { + gc *v1.GatewayClass, + gws map[types.NamespacedName]*v1.Gateway, +) map[types.NamespacedName]*NginxProxy { + referencedNginxProxies := make(map[types.NamespacedName]*NginxProxy) + if gcReferencesAnyNginxProxy(gc) { - npCfg := nps[types.NamespacedName{Name: gc.Spec.ParametersRef.Name}] - if npCfg != nil { - errs := validateNginxProxy(validator, npCfg) - - return &NginxProxy{ - Source: npCfg, - Valid: len(errs) == 0, - ErrMsgs: errs, + // we will ignore references without namespaces + // the gateway class status will contain an error message about the missing namespace + if gc.Spec.ParametersRef.Namespace != nil { + refNp := types.NamespacedName{ + Name: gc.Spec.ParametersRef.Name, + Namespace: string(*gc.Spec.ParametersRef.Namespace), + } + + if np, ok := nps[refNp]; ok { + referencedNginxProxies[refNp] = buildNginxProxy(np, validator) } } } - return nil + for _, gw := range gws { + if gwReferencesAnyNginxProxy(gw) { + refNp := types.NamespacedName{ + Name: gw.Spec.Infrastructure.ParametersRef.Name, + Namespace: gw.Namespace, + } + if np, ok := nps[refNp]; ok { + referencedNginxProxies[refNp] = buildNginxProxy(np, validator) + } else { + referencedNginxProxies[refNp] = nil + } + } + } + + if len(referencedNginxProxies) == 0 { + return nil + } + + return referencedNginxProxies } -// isNginxProxyReferenced returns whether or not a specific NginxProxy is referenced in the GatewayClass. -func isNginxProxyReferenced(npNSName types.NamespacedName, gc *GatewayClass) bool { - return gc != nil && gcReferencesAnyNginxProxy(gc.Source) && gc.Source.Spec.ParametersRef.Name == npNSName.Name +// buildNginxProxy validates and returns the NginxProxy associated with the GatewayClass (if it exists). +func buildNginxProxy( + np *ngfAPIv1alpha2.NginxProxy, + validator validation.GenericValidator, +) *NginxProxy { + if np != nil { + errs := validateNginxProxy(validator, np) + + return &NginxProxy{ + Source: np, + Valid: len(errs) == 0, + ErrMsgs: errs, + } + } + + return nil } // gcReferencesNginxProxy returns whether a GatewayClass references any NginxProxy resource. func gcReferencesAnyNginxProxy(gc *v1.GatewayClass) bool { if gc != nil { ref := gc.Spec.ParametersRef - return ref != nil && ref.Group == ngfAPI.GroupName && ref.Kind == v1.Kind(kinds.NginxProxy) + return ref != nil && ref.Group == ngfAPIv1alpha2.GroupName && ref.Kind == kinds.NginxProxy + } + + return false +} + +func gwReferencesAnyNginxProxy(gw *v1.Gateway) bool { + if gw != nil && gw.Spec.Infrastructure != nil { + ref := gw.Spec.Infrastructure.ParametersRef + return ref != nil && ref.Group == ngfAPIv1alpha2.GroupName && ref.Kind == kinds.NginxProxy } return false @@ -64,7 +207,7 @@ func gcReferencesAnyNginxProxy(gc *v1.GatewayClass) bool { // validateNginxProxy performs re-validation on string values in the case of CRD validation failure. func validateNginxProxy( validator validation.GenericValidator, - npCfg *ngfAPI.NginxProxy, + npCfg *ngfAPIv1alpha2.NginxProxy, ) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -85,8 +228,8 @@ func validateNginxProxy( exp := telemetry.Exporter expPath := telPath.Child("exporter") - if exp.Endpoint != "" { - if err := validator.ValidateEndpoint(exp.Endpoint); err != nil { + if exp.Endpoint != nil { + if err := validator.ValidateEndpoint(*exp.Endpoint); err != nil { allErrs = append(allErrs, field.Invalid(expPath.Child("endpoint"), exp.Endpoint, err.Error())) } } @@ -116,17 +259,15 @@ func validateNginxProxy( ipFamily := npCfg.Spec.IPFamily ipFamilyPath := spec.Child("ipFamily") switch *ipFamily { - case ngfAPI.Dual, ngfAPI.IPv4, ngfAPI.IPv6: + case ngfAPIv1alpha2.Dual, ngfAPIv1alpha2.IPv4, ngfAPIv1alpha2.IPv6: default: allErrs = append( allErrs, field.NotSupported( ipFamilyPath, ipFamily, - []string{string(ngfAPI.Dual), string(ngfAPI.IPv4), string(ngfAPI.IPv6)})) + []string{string(ngfAPIv1alpha2.Dual), string(ngfAPIv1alpha2.IPv4), string(ngfAPIv1alpha2.IPv6)})) } - } else { - npCfg.Spec.IPFamily = helpers.GetPointer[ngfAPI.IPFamilyType](ngfAPI.Dual) } allErrs = append(allErrs, validateLogging(npCfg)...) @@ -138,7 +279,7 @@ func validateNginxProxy( return allErrs } -func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateLogging(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -150,14 +291,14 @@ func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { errLevel := string(*logging.ErrorLevel) validLogLevels := []string{ - string(ngfAPI.NginxLogLevelDebug), - string(ngfAPI.NginxLogLevelInfo), - string(ngfAPI.NginxLogLevelNotice), - string(ngfAPI.NginxLogLevelWarn), - string(ngfAPI.NginxLogLevelError), - string(ngfAPI.NginxLogLevelCrit), - string(ngfAPI.NginxLogLevelAlert), - string(ngfAPI.NginxLogLevelEmerg), + string(ngfAPIv1alpha2.NginxLogLevelDebug), + string(ngfAPIv1alpha2.NginxLogLevelInfo), + string(ngfAPIv1alpha2.NginxLogLevelNotice), + string(ngfAPIv1alpha2.NginxLogLevelWarn), + string(ngfAPIv1alpha2.NginxLogLevelError), + string(ngfAPIv1alpha2.NginxLogLevelCrit), + string(ngfAPIv1alpha2.NginxLogLevelAlert), + string(ngfAPIv1alpha2.NginxLogLevelEmerg), } if !slices.Contains(validLogLevels, errLevel) { @@ -175,7 +316,7 @@ func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { return allErrs } -func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateRewriteClientIP(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -194,14 +335,17 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { } switch mode { - case ngfAPI.RewriteClientIPModeProxyProtocol, ngfAPI.RewriteClientIPModeXForwardedFor: + case ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol, ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor: default: allErrs = append( allErrs, field.NotSupported( rewriteClientIPPath.Child("mode"), mode, - []string{string(ngfAPI.RewriteClientIPModeProxyProtocol), string(ngfAPI.RewriteClientIPModeXForwardedFor)}, + []string{ + string(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + string(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + }, ), ) } @@ -218,15 +362,15 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { valuePath := trustedAddressesPath.Child("value") switch addr.Type { - case ngfAPI.RewriteClientIPCIDRAddressType: + case ngfAPIv1alpha2.RewriteClientIPCIDRAddressType: if err := k8svalidation.IsValidCIDR(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.RewriteClientIPIPAddressType: + case ngfAPIv1alpha2.RewriteClientIPIPAddressType: if err := k8svalidation.IsValidIP(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.RewriteClientIPHostnameAddressType: + case ngfAPIv1alpha2.RewriteClientIPHostnameAddressType: if errs := k8svalidation.IsDNS1123Subdomain(addr.Value); len(errs) > 0 { for _, e := range errs { allErrs = append(allErrs, field.Invalid(valuePath, addr.Value, e)) @@ -238,9 +382,9 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { field.NotSupported(trustedAddressesPath.Child("type"), addr.Type, []string{ - string(ngfAPI.RewriteClientIPCIDRAddressType), - string(ngfAPI.RewriteClientIPIPAddressType), - string(ngfAPI.RewriteClientIPHostnameAddressType), + string(ngfAPIv1alpha2.RewriteClientIPCIDRAddressType), + string(ngfAPIv1alpha2.RewriteClientIPIPAddressType), + string(ngfAPIv1alpha2.RewriteClientIPHostnameAddressType), }, ), ) @@ -251,7 +395,7 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { return allErrs } -func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateNginxPlus(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -264,11 +408,11 @@ func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { valuePath := nginxPlusPath.Child("value") switch addr.Type { - case ngfAPI.NginxPlusAllowCIDRAddressType: + case ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType: if err := k8svalidation.IsValidCIDR(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.NginxPlusAllowIPAddressType: + case ngfAPIv1alpha2.NginxPlusAllowIPAddressType: if err := k8svalidation.IsValidIP(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } @@ -278,8 +422,8 @@ func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { field.NotSupported(nginxPlusPath.Child("type"), addr.Type, []string{ - string(ngfAPI.NginxPlusAllowCIDRAddressType), - string(ngfAPI.NginxPlusAllowIPAddressType), + string(ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType), + string(ngfAPIv1alpha2.NginxPlusAllowIPAddressType), }, ), ) diff --git a/internal/mode/static/state/graph/nginxproxy_test.go b/internal/mode/static/state/graph/nginxproxy_test.go index 325a996321..4e21c7283d 100644 --- a/internal/mode/static/state/graph/nginxproxy_test.go +++ b/internal/mode/static/state/graph/nginxproxy_test.go @@ -7,78 +7,368 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation/validationfakes" ) -func TestGetNginxProxy(t *testing.T) { +func createValidValidator() *validationfakes.FakeGenericValidator { + v := &validationfakes.FakeGenericValidator{} + v.ValidateEscapedStringNoVarExpansionReturns(nil) + v.ValidateEndpointReturns(nil) + v.ValidateServiceNameReturns(nil) + v.ValidateNginxDurationReturns(nil) + + return v +} + +func createInvalidValidator() *validationfakes.FakeGenericValidator { + v := &validationfakes.FakeGenericValidator{} + v.ValidateEscapedStringNoVarExpansionReturns(errors.New("error")) + v.ValidateEndpointReturns(errors.New("error")) + v.ValidateServiceNameReturns(errors.New("error")) + v.ValidateNginxDurationReturns(errors.New("error")) + + return v +} + +func TestBuildEffectiveNginxProxy(t *testing.T) { t.Parallel() + + newTestNginxProxy := func( + ipFam ngfAPIv1alpha2.IPFamilyType, + disableFeats []ngfAPIv1alpha2.DisableTelemetryFeature, + interval ngfAPIv1alpha1.Duration, + batchSize int32, + batchCount int32, + endpoint string, + serviceName string, + spanAttr ngfAPIv1alpha1.SpanAttribute, + mode ngfAPIv1alpha2.RewriteClientIPModeType, + trustedAddr []ngfAPIv1alpha2.RewriteClientIPAddress, + logLevel ngfAPIv1alpha2.NginxErrorLogLevel, + setIP bool, + disableHTTP bool, + nginxDebug bool, + ) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: &ipFam, + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: disableFeats, + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: &interval, + BatchSize: &batchSize, + BatchCount: &batchCount, + Endpoint: &endpoint, + }, + ServiceName: &serviceName, + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{spanAttr}, + }, + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: &mode, + SetIPRecursively: &setIP, + TrustedAddresses: trustedAddr, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: &logLevel, + }, + DisableHTTP2: &disableHTTP, + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Container: ngfAPIv1alpha2.ContainerSpec{ + Debug: &nginxDebug, + }, + }, + }, + }, + } + } + + getNginxProxy := func() *ngfAPIv1alpha2.NginxProxy { + return newTestNginxProxy( + ngfAPIv1alpha2.Dual, + []ngfAPIv1alpha2.DisableTelemetryFeature{ngfAPIv1alpha2.DisableTracing}, + "10s", + 10, + 5, + "endpoint:1234", + "my-service", + ngfAPIv1alpha1.SpanAttribute{Key: "key", Value: "val"}, + ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor, + []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1"}, + }, + ngfAPIv1alpha2.NginxLogLevelAlert, + true, + false, + false, + ) + } + + getNginxProxyAllFieldsSetDifferently := func() *ngfAPIv1alpha2.NginxProxy { + return newTestNginxProxy( + ngfAPIv1alpha2.IPv6, + []ngfAPIv1alpha2.DisableTelemetryFeature{}, + "5s", + 8, + 2, + "diff-endpoint:1234", + "diff-service", + ngfAPIv1alpha1.SpanAttribute{Key: "diff-key", Value: "diff-val"}, + ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor, + []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/24"}, + }, + ngfAPIv1alpha2.NginxLogLevelError, + false, + true, + true, + ) + } + + getExpSpec := func() *EffectiveNginxProxy { + enp := EffectiveNginxProxy(getNginxProxy().Spec) + return &enp + } + + getModifiedExpSpec := func(mod func(*ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy) *EffectiveNginxProxy { + enp := EffectiveNginxProxy(mod(getNginxProxy()).Spec) + return &enp + } + tests := []struct { - nps map[types.NamespacedName]*ngfAPI.NginxProxy - gc *v1.GatewayClass - expNP *NginxProxy - name string + gcNp *NginxProxy + gwNp *NginxProxy + exp *EffectiveNginxProxy + name string }{ { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{ - {Name: "np1"}: {}, + name: "both gateway class and gateway nginx proxies are nil", + gcNp: nil, + gwNp: nil, + exp: nil, + }, + { + name: "nil gateway class nginx proxy", + gcNp: nil, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway class nginx proxy; invalid gateway nginx proxy", + gcNp: nil, + gwNp: &NginxProxy{Valid: false, Source: getNginxProxy()}, + exp: nil, + }, + { + name: "nil gateway class nginx proxy; nil gateway nginx proxy source", + gcNp: nil, + gwNp: &NginxProxy{Valid: true, Source: nil}, + exp: nil, + }, + { + name: "invalid gateway class nginx proxy", + gcNp: &NginxProxy{Valid: false}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway class nginx proxy source", + gcNp: &NginxProxy{Valid: true, Source: nil}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway nginx proxy", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: nil, + exp: getExpSpec(), + }, + { + name: "invalid gateway nginx proxy", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: false}, + exp: getExpSpec(), + }, + { + name: "nil gateway nginx proxy source", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: true, Source: nil}, + exp: getExpSpec(), + }, + { + name: "both have all fields set; gateway values should win", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxyAllFieldsSetDifferently()}, + exp: getModifiedExpSpec(func(_ *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + return getNginxProxyAllFieldsSetDifferently() + }), + }, + { + name: "gateway nginx proxy overrides nginx error log level", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + }, + }, + }, }, - gc: nil, - expNP: nil, - name: "nil gatewayclass", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.Logging.ErrorLevel = helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug) + return np + }), }, { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{}, - gc: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "np1", + name: "gateway nginx proxy overrides select telemetry values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("new-service-name"), + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + BatchSize: helpers.GetPointer[int32](20), + Endpoint: helpers.GetPointer("new-endpoint"), + }, + }, }, }, }, - expNP: nil, - name: "no nginxproxy resources", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.Telemetry.ServiceName = helpers.GetPointer("new-service-name") + np.Spec.Telemetry.Exporter.Endpoint = helpers.GetPointer("new-endpoint") + np.Spec.Telemetry.Exporter.BatchSize = helpers.GetPointer[int32](20) + return np + }), }, { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{ - {Name: "np1"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "np1", + name: "gateway nginx proxy overrides select rewrite client IP values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + SetIPRecursively: helpers.GetPointer(false), + }, }, }, - {Name: "np2"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "np2", + }, + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.RewriteClientIP.Mode = helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol) + np.Spec.RewriteClientIP.SetIPRecursively = helpers.GetPointer(false) + return np + }), + }, + { + name: "gateway nginx proxy unsets slices values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{}, + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{}, + }, + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{}, + }, }, }, }, - gc: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "np2", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.RewriteClientIP.TrustedAddresses = []ngfAPIv1alpha2.RewriteClientIPAddress{} + np.Spec.Telemetry.DisabledFeatures = []ngfAPIv1alpha2.DisableTelemetryFeature{} + np.Spec.Telemetry.SpanAttributes = []ngfAPIv1alpha1.SpanAttribute{} + return np + }), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + enp := buildEffectiveNginxProxy(test.gcNp, test.gwNp) + g.Expect(enp).To(Equal(test.exp)) + }) + } +} + +func TestTelemetryEnabledForNginxProxy(t *testing.T) { + t.Parallel() + + tests := []struct { + ep *EffectiveNginxProxy + name string + enabled bool + }{ + { + name: "telemetry struct is nil", + ep: &EffectiveNginxProxy{ + Telemetry: nil, + }, + enabled: false, + }, + { + name: "telemetry exporter is nil", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: nil, + }, + }, + enabled: false, + }, + { + name: "tracing is disabled", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ + ngfAPIv1alpha2.DisableTracing, + }, + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("new-endpoint"), }, }, }, - expNP: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np2", + enabled: false, + }, + { + name: "exporter endpoint is nil", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: nil, }, - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), + }, + }, + enabled: false, + }, + { + name: "normal case; enabled", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("new-endpoint"), }, }, - Valid: true, }, - name: "returns correct resource", + enabled: true, }, } @@ -87,64 +377,225 @@ func TestGetNginxProxy(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildNginxProxy(test.nps, test.gc, &validationfakes.FakeGenericValidator{})).To(Equal(test.expNP)) + enabled := telemetryEnabledForNginxProxy(test.ep) + g.Expect(enabled).To(Equal(test.enabled)) }) } } -func TestIsNginxProxyReferenced(t *testing.T) { +func TestMetricsEnabledForNginxProxy(t *testing.T) { t.Parallel() + tests := []struct { - gc *GatewayClass - npName types.NamespacedName - name string - expRes bool + ep *EffectiveNginxProxy + port *int32 + name string + enabled bool }{ { - gc: &GatewayClass{ - Source: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", - }, - }, + name: "NginxProxy is nil", + port: nil, + enabled: true, + }, + { + name: "metrics struct is nil", + ep: &EffectiveNginxProxy{ + Metrics: nil, + }, + port: nil, + enabled: true, + }, + { + name: "metrics disable is nil", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: nil, }, }, - npName: types.NamespacedName{}, - expRes: false, - name: "nil nginxproxy", + port: nil, + enabled: true, }, { - gc: nil, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: false, - name: "nil gatewayclass", + name: "metrics is disabled", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(true), + }, + }, + port: nil, + enabled: false, }, { - gc: &GatewayClass{ - Source: nil, + name: "metrics is enabled with no port specified", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + }, }, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: false, - name: "nil gatewayclass source", + port: nil, + enabled: true, }, { - gc: &GatewayClass{ - Source: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", - }, + name: "metrics is enabled with port specified", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer[int32](8080), + }, + }, + port: helpers.GetPointer[int32](8080), + enabled: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + port, enabled := MetricsEnabledForNginxProxy(test.ep) + g.Expect(port).To(Equal(test.port)) + g.Expect(enabled).To(Equal(test.enabled)) + }) + } +} + +func TestProcessNginxProxies(t *testing.T) { + t.Parallel() + + gatewayClassNpName := types.NamespacedName{Namespace: "gc-ns", Name: "gc-np"} + gatewayNpName := types.NamespacedName{Namespace: "gw-ns", Name: "gw-np"} + unreferencedNpName := types.NamespacedName{Namespace: "test", Name: "unref"} + + getTestNp := func(nsname types.NamespacedName) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nsname.Namespace, + Name: nsname.Name, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("service-name"), + }, + }, + } + } + + gateway := map[types.NamespacedName]*v1.Gateway{ + gatewayNpName: { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "gw-ns", + }, + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayNpName.Name, }, }, }, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: true, - name: "references the NginxProxy", + }, + } + + gatewayClass := &v1.GatewayClass{ + Spec: v1.GatewayClassSpec{ + ParametersRef: &v1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayClassNpName.Name, + Namespace: helpers.GetPointer[v1.Namespace]("gc-ns"), + }, + }, + } + + gatewayClassRefMissingNs := &v1.GatewayClass{ + Spec: v1.GatewayClassSpec{ + ParametersRef: &v1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayClassNpName.Name, + }, + }, + } + + getNpMap := func() map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy { + return map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + gatewayClassNpName: getTestNp(gatewayClassNpName), + gatewayNpName: getTestNp(gatewayNpName), + unreferencedNpName: getTestNp(unreferencedNpName), + } + } + + getExpResult := func(valid bool) map[types.NamespacedName]*NginxProxy { + var errMsgs field.ErrorList + if !valid { + errMsgs = field.ErrorList{ + field.Invalid(field.NewPath("spec.telemetry.serviceName"), "service-name", "error"), + } + } + + return map[types.NamespacedName]*NginxProxy{ + gatewayNpName: { + Valid: valid, + ErrMsgs: errMsgs, + Source: getTestNp(gatewayNpName), + }, + gatewayClassNpName: { + Valid: valid, + ErrMsgs: errMsgs, + Source: getTestNp(gatewayClassNpName), + }, + } + } + + tests := []struct { + validator validation.GenericValidator + nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy + gc *v1.GatewayClass + gws map[types.NamespacedName]*v1.Gateway + expResult map[types.NamespacedName]*NginxProxy + name string + }{ + { + name: "no nginx proxies", + nps: nil, + gc: gatewayClass, + gws: gateway, + validator: createValidValidator(), + expResult: map[types.NamespacedName]*NginxProxy{gatewayNpName: nil}, + }, + { + name: "gateway class param ref is missing namespace", + nps: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + gatewayClassNpName: getTestNp(gatewayClassNpName), + gatewayNpName: getTestNp(gatewayNpName), + }, + gc: gatewayClassRefMissingNs, + gws: gateway, + validator: createValidValidator(), + expResult: map[types.NamespacedName]*NginxProxy{ + gatewayNpName: { + Valid: true, + Source: getTestNp(gatewayNpName), + }, + }, + }, + { + name: "normal case; both nginx proxies are valid", + nps: getNpMap(), + gc: gatewayClass, + gws: gateway, + validator: createValidValidator(), + expResult: getExpResult(true), + }, + { + name: "normal case; both nginx proxies are invalid", + nps: getNpMap(), + gc: gatewayClass, + gws: gateway, + validator: createInvalidValidator(), + expResult: getExpResult(false), }, } @@ -153,7 +604,14 @@ func TestIsNginxProxyReferenced(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(isNginxProxyReferenced(test.npName, test.gc)).To(Equal(test.expRes)) + result := processNginxProxies( + test.nps, + test.validator, + test.gc, + test.gws, + ) + + g.Expect(helpers.Diff(test.expResult, result)).To(BeEmpty()) }) } } @@ -194,7 +652,7 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { gc: &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha2.GroupName, Kind: v1.Kind("WrongKind"), Name: "wrong-kind", }, @@ -207,7 +665,7 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { gc: &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha2.GroupName, Kind: v1.Kind(kinds.NginxProxy), Name: "nginx-proxy", }, @@ -228,30 +686,95 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { } } -func createValidValidator() *validationfakes.FakeGenericValidator { - v := &validationfakes.FakeGenericValidator{} - v.ValidateEscapedStringNoVarExpansionReturns(nil) - v.ValidateEndpointReturns(nil) - v.ValidateServiceNameReturns(nil) - v.ValidateNginxDurationReturns(nil) - - return v -} +func TestGWReferencesAnyNginxProxy(t *testing.T) { + t.Parallel() + tests := []struct { + gw *v1.Gateway + name string + expRes bool + }{ + { + gw: nil, + expRes: false, + name: "nil gateway", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{}, + }, + expRes: false, + name: "nil infrastructure", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{}, + }, + }, + expRes: false, + name: "nil parametersRef", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group("wrong-group"), + Kind: v1.Kind(kinds.NginxProxy), + Name: "wrong-group", + }, + }, + }, + }, + expRes: false, + name: "wrong group name", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group(ngfAPIv1alpha2.GroupName), + Kind: v1.Kind("wrong-kind"), + Name: "wrong-kind", + }, + }, + }, + }, + expRes: false, + name: "wrong kind", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group(ngfAPIv1alpha2.GroupName), + Kind: v1.Kind(kinds.NginxProxy), + Name: "normal", + }, + }, + }, + }, + expRes: true, + name: "references an NginxProxy", + }, + } -func createInvalidValidator() *validationfakes.FakeGenericValidator { - v := &validationfakes.FakeGenericValidator{} - v.ValidateEscapedStringNoVarExpansionReturns(errors.New("error")) - v.ValidateEndpointReturns(errors.New("error")) - v.ValidateServiceNameReturns(errors.New("error")) - v.ValidateNginxDurationReturns(errors.New("error")) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) - return v + g.Expect(gwReferencesAnyNginxProxy(test.gw)).To(Equal(test.expRes)) + }) + } } func TestValidateNginxProxy(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy validator *validationfakes.FakeGenericValidator name string expErrSubstring string @@ -260,36 +783,36 @@ func TestValidateNginxProxy(t *testing.T) { { name: "valid nginxproxy", validator: createValidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), - Exporter: &ngfAPI.TelemetryExporter{ - Interval: helpers.GetPointer[ngfAPI.Duration]("5ms"), - Endpoint: "my-endpoint", + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: helpers.GetPointer[ngfAPIv1alpha1.Duration]("5ms"), + Endpoint: helpers.GetPointer("my-endpoint"), }, - SpanAttributes: []ngfAPI.SpanAttribute{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "key", Value: "value"}, }, }, - IPFamily: helpers.GetPointer[ngfAPI.IPFamilyType](ngfAPI.Dual), - RewriteClientIP: &ngfAPI.RewriteClientIP{ + IPFamily: helpers.GetPointer[ngfAPIv1alpha2.IPFamilyType](ngfAPIv1alpha2.Dual), + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.1.1.1", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -298,9 +821,9 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid serviceName", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), // any value is invalid by the validator }, }, @@ -311,11 +834,11 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid endpoint", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Endpoint: "my-endpoint", // any value is invalid by the validator + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-endpoint"), // any value is invalid by the validator }, }, }, @@ -326,11 +849,11 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid interval", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Interval: helpers.GetPointer[ngfAPI.Duration]( + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: helpers.GetPointer[ngfAPIv1alpha1.Duration]( "my-interval", ), // any value is invalid by the validator }, @@ -343,10 +866,10 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid spanAttributes", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - SpanAttributes: []ngfAPI.SpanAttribute{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "my-key", Value: "my-value"}, // any value is invalid by the validator }, }, @@ -358,10 +881,10 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid ipFamily type", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{}, - IPFamily: helpers.GetPointer[ngfAPI.IPFamilyType]("invalid"), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{}, + IPFamily: helpers.GetPointer[ngfAPIv1alpha2.IPFamilyType]("invalid"), }, }, expErrSubstring: "spec.ipFamily", @@ -386,7 +909,7 @@ func TestValidateNginxProxy(t *testing.T) { func TestValidateRewriteClientIP(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy validator *validationfakes.FakeGenericValidator name string errorString string @@ -395,33 +918,33 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "valid rewriteClientIP", validator: createValidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.56.32.11/32", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.1.1.1", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "2001:db8:a0b:12f0::1", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -430,21 +953,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid CIDR in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8::/129", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/32", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -455,21 +978,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid IP address in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.2.3.4.5", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "10.0.0.1", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -480,21 +1003,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid hostname in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "bad-host$%^", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -507,10 +1030,10 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is set and trustedAddresses is empty", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -520,32 +1043,32 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when trustedAddresses is greater in length than 16", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, }, }, }, @@ -556,17 +1079,17 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is not proxyProtocol or XForwardedFor", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeType("invalid")), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeType("invalid")), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/32", }, }, @@ -580,10 +1103,10 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is not proxyProtocol or XForwardedFor and trustedAddresses is empty", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeType("invalid")), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeType("invalid")), }, }, }, @@ -595,17 +1118,17 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid address type in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPAddressType("invalid"), + Type: ngfAPIv1alpha2.RewriteClientIPAddressType("invalid"), Value: "2001:db8::/129", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -631,19 +1154,19 @@ func TestValidateRewriteClientIP(t *testing.T) { func TestValidateLogging(t *testing.T) { t.Parallel() - invalidLogLevel := ngfAPI.NginxErrorLogLevel("invalid-log-level") + invalidLogLevel := ngfAPIv1alpha2.NginxErrorLogLevel("invalid-log-level") tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy name string errorString string expectErrCount int }{ { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelDebug), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, }, }, @@ -652,10 +1175,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelInfo), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), }, }, }, @@ -664,10 +1187,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelNotice), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), }, }, }, @@ -676,10 +1199,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelWarn), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), }, }, }, @@ -688,10 +1211,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelError), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, }, @@ -700,10 +1223,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelCrit), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), }, }, }, @@ -712,10 +1235,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelAlert), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), }, }, }, @@ -724,10 +1247,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelEmerg), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), }, }, }, @@ -736,9 +1259,9 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ ErrorLevel: &invalidLogLevel, }, }, @@ -749,9 +1272,9 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{}, }, }, name: "empty log level", @@ -778,20 +1301,20 @@ func TestValidateNginxPlus(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy name string errorString string expectErrCount int }{ { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "2001:db8:a0b:12f0::1"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "2001:db8:a0b:12f0::1"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/32"}, }, }, }, @@ -801,12 +1324,12 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/37"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/37"}, }, }, }, @@ -817,12 +1340,12 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3.5/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3.5/32"}, }, }, }, @@ -833,11 +1356,11 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowAddressType("Hostname"), Value: "example.com"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowAddressType("Hostname"), Value: "example.com"}, }, }, }, @@ -848,11 +1371,11 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowAddressType("invalid"), Value: "example.com"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowAddressType("invalid"), Value: "example.com"}, }, }, }, @@ -877,3 +1400,11 @@ func TestValidateNginxPlus(t *testing.T) { }) } } + +func TestValidateNginxProxy_NilCase(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + // Just testing the nil case for coverage reasons. The rest of the function is covered by other tests. + g.Expect(buildNginxProxy(nil, &validationfakes.FakeGenericValidator{})).To(BeNil()) +} diff --git a/internal/mode/static/state/graph/policies.go b/internal/mode/static/state/graph/policies.go index 04fb6a0767..50dd3c3601 100644 --- a/internal/mode/static/state/graph/policies.go +++ b/internal/mode/static/state/graph/policies.go @@ -21,6 +21,9 @@ import ( type Policy struct { // Source is the corresponding Policy resource. Source policies.Policy + // InvalidForGateways is a map of Gateways for which this Policy is invalid for. Certain NginxProxy + // configurations may result in a policy not being valid for some Gateways, but not others. + InvalidForGateways map[types.NamespacedName]struct{} // Ancestors is a list of ancestor objects of the Policy. Used in status. Ancestors []PolicyAncestor // TargetRefs are the resources that the Policy targets. @@ -67,8 +70,8 @@ const ( ) // attachPolicies attaches the graph's processed policies to the resources they target. It modifies the graph in place. -func (g *Graph) attachPolicies(ctlrName string) { - if g.Gateway == nil { +func (g *Graph) attachPolicies(validator validation.PolicyValidator, ctlrName string) { + if len(g.Gateways) == 0 { return } @@ -76,21 +79,21 @@ func (g *Graph) attachPolicies(ctlrName string) { for _, ref := range policy.TargetRefs { switch ref.Kind { case kinds.Gateway: - attachPolicyToGateway(policy, ref, g.Gateway, g.IgnoredGateways, ctlrName) + attachPolicyToGateway(policy, ref, g.Gateways, ctlrName) case kinds.HTTPRoute, kinds.GRPCRoute: route, exists := g.Routes[routeKeyForKind(ref.Kind, ref.Nsname)] if !exists { continue } - attachPolicyToRoute(policy, route, ctlrName) + attachPolicyToRoute(policy, route, validator, ctlrName) case kinds.Service: svc, exists := g.ReferencedServices[ref.Nsname] if !exists { continue } - attachPolicyToService(policy, svc, g.Gateway, ctlrName) + attachPolicyToService(policy, svc, g.Gateways, ctlrName) } } } @@ -99,35 +102,51 @@ func (g *Graph) attachPolicies(ctlrName string) { func attachPolicyToService( policy *Policy, svc *ReferencedService, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, ctlrName string, ) { if ngfPolicyAncestorsFull(policy, ctlrName) { return } - ancestor := PolicyAncestor{ - Ancestor: createParentReference(v1.GroupName, kinds.Gateway, client.ObjectKeyFromObject(gw.Source)), - } + var validForAGateway bool + for gwNsName, gw := range gws { + if _, belongsToGw := svc.GatewayNsNames[gwNsName]; !belongsToGw { + continue + } - if !gw.Valid { - ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("Parent Gateway is invalid")} - if ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { - return + ancestor := PolicyAncestor{ + Ancestor: createParentReference(v1.GroupName, kinds.Gateway, client.ObjectKeyFromObject(gw.Source)), } - policy.Ancestors = append(policy.Ancestors, ancestor) - return - } + if !gw.Valid { + policy.InvalidForGateways[gwNsName] = struct{}{} + ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("Parent Gateway is invalid")} + if ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { + continue + } - if !ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { - policy.Ancestors = append(policy.Ancestors, ancestor) + policy.Ancestors = append(policy.Ancestors, ancestor) + continue + } + + if !ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { + policy.Ancestors = append(policy.Ancestors, ancestor) + } + validForAGateway = true } - svc.Policies = append(svc.Policies, policy) + if validForAGateway { + svc.Policies = append(svc.Policies, policy) + } } -func attachPolicyToRoute(policy *Policy, route *L7Route, ctlrName string) { +func attachPolicyToRoute(policy *Policy, route *L7Route, validator validation.PolicyValidator, ctlrName string) { + if ngfPolicyAncestorsFull(policy, ctlrName) { + // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 + return + } + kind := v1.Kind(kinds.HTTPRoute) if route.RouteType == RouteTypeGRPC { kind = kinds.GRPCRoute @@ -139,31 +158,43 @@ func attachPolicyToRoute(policy *Policy, route *L7Route, ctlrName string) { Ancestor: createParentReference(v1.GroupName, kind, routeNsName), } - if ngfPolicyAncestorsFull(policy, ctlrName) { - // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 - return - } - if !route.Valid || !route.Attachable || len(route.ParentRefs) == 0 { ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is invalid")} policy.Ancestors = append(policy.Ancestors, ancestor) return } + // as of now, ObservabilityPolicy is the only policy that needs this check, and it only attaches to Routes + for _, parentRef := range route.ParentRefs { + if parentRef.Gateway != nil && parentRef.Gateway.EffectiveNginxProxy != nil { + gw := parentRef.Gateway + globalSettings := &policies.GlobalSettings{ + TelemetryEnabled: telemetryEnabledForNginxProxy(gw.EffectiveNginxProxy), + } + + if conds := validator.ValidateGlobalSettings(policy.Source, globalSettings); len(conds) > 0 { + policy.InvalidForGateways[gw.NamespacedName] = struct{}{} + ancestor.Conditions = append(ancestor.Conditions, conds...) + } + } + } + policy.Ancestors = append(policy.Ancestors, ancestor) + if len(policy.InvalidForGateways) == len(route.ParentRefs) { + return + } + route.Policies = append(route.Policies, policy) } func attachPolicyToGateway( policy *Policy, ref PolicyTargetRef, - gw *Gateway, - ignoredGateways map[types.NamespacedName]*v1.Gateway, + gateways map[types.NamespacedName]*Gateway, ctlrName string, ) { - _, ignored := ignoredGateways[ref.Nsname] - - if !ignored && ref.Nsname != client.ObjectKeyFromObject(gw.Source) { + if ngfPolicyAncestorsFull(policy, ctlrName) { + // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 return } @@ -171,18 +202,17 @@ func attachPolicyToGateway( Ancestor: createParentReference(v1.GroupName, kinds.Gateway, ref.Nsname), } - if ngfPolicyAncestorsFull(policy, ctlrName) { - // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 - return - } + gw, exists := gateways[ref.Nsname] - if ignored { - ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is ignored")} + if !exists || (gw != nil && gw.Source == nil) { + policy.InvalidForGateways[ref.Nsname] = struct{}{} + ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is not found")} policy.Ancestors = append(policy.Ancestors, ancestor) return } if !gw.Valid { + policy.InvalidForGateways[ref.Nsname] = struct{}{} ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is invalid")} policy.Ancestors = append(policy.Ancestors, ancestor) return @@ -195,12 +225,11 @@ func attachPolicyToGateway( func processPolicies( pols map[PolicyKey]policies.Policy, validator validation.PolicyValidator, - gateways processedGateways, routes map[RouteKey]*L7Route, services map[types.NamespacedName]*ReferencedService, - globalSettings *policies.GlobalSettings, + gws map[types.NamespacedName]*Gateway, ) map[PolicyKey]*Policy { - if len(pols) == 0 || gateways.Winner == nil { + if len(pols) == 0 || len(gws) == 0 { return nil } @@ -217,7 +246,7 @@ func processPolicies( switch refGroupKind(ref.Group, ref.Kind) { case gatewayGroupKind: - if !gatewayExists(refNsName, gateways.Winner, gateways.Ignored) { + if !gatewayExists(refNsName, gws) { continue } case hrGroupKind, grpcGroupKind: @@ -249,14 +278,15 @@ func processPolicies( overlapConds := checkTargetRoutesForOverlap(targetedRoutes, routes) conds = append(conds, overlapConds...) - conds = append(conds, validator.Validate(policy, globalSettings)...) + conds = append(conds, validator.Validate(policy)...) processedPolicies[key] = &Policy{ - Source: policy, - Valid: len(conds) == 0, - Conditions: conds, - TargetRefs: targetRefs, - Ancestors: make([]PolicyAncestor, 0, len(targetRefs)), + Source: policy, + Valid: len(conds) == 0, + Conditions: conds, + TargetRefs: targetRefs, + Ancestors: make([]PolicyAncestor, 0, len(targetRefs)), + InvalidForGateways: make(map[types.NamespacedName]struct{}), } } diff --git a/internal/mode/static/state/graph/policies_test.go b/internal/mode/static/state/graph/policies_test.go index a7a8b71dc2..4adda19357 100644 --- a/internal/mode/static/state/graph/policies_test.go +++ b/internal/mode/static/state/graph/policies_test.go @@ -11,6 +11,7 @@ import ( v1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" @@ -75,8 +76,10 @@ func TestAttachPolicies(t *testing.T) { } expectNoGatewayPolicyAttachment := func(g *WithT, graph *Graph) { - if graph.Gateway != nil { - g.Expect(graph.Gateway.Policies).To(BeNil()) + for _, gw := range graph.Gateways { + if gw != nil { + g.Expect(gw.Policies).To(BeNil()) + } } } @@ -93,8 +96,10 @@ func TestAttachPolicies(t *testing.T) { } expectGatewayPolicyAttachment := func(g *WithT, graph *Graph) { - if graph.Gateway != nil { - g.Expect(graph.Gateway.Policies).To(HaveLen(1)) + for _, gw := range graph.Gateways { + if gw != nil { + g.Expect(gw.Policies).To(HaveLen(1)) + } } } @@ -144,26 +149,43 @@ func TestAttachPolicies(t *testing.T) { ) } - getGateway := func() *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: testNs, + getGateways := func() map[types.NamespacedName]*Gateway { + return map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gateway"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: testNs, + }, }, + Valid: true, + }, + {Namespace: testNs, Name: "gateway1"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway1", + Namespace: testNs, + }, + }, + Valid: true, }, - Valid: true, } } getServices := func() map[types.NamespacedName]*ReferencedService { return map[types.NamespacedName]*ReferencedService{ - {Namespace: testNs, Name: "svc-1"}: {}, + {Namespace: testNs, Name: "svc-1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: testNs, Name: "gateway"}: {}, + {Namespace: testNs, Name: "gateway1"}: {}, + }, + Policies: nil, + }, } } tests := []struct { - gateway *Gateway + gateway map[types.NamespacedName]*Gateway routes map[RouteKey]*L7Route svcs map[types.NamespacedName]*ReferencedService ngfPolicies map[PolicyKey]*Policy @@ -178,7 +200,7 @@ func TestAttachPolicies(t *testing.T) { }, { name: "nil Routes; gateway and service policies attach", - gateway: getGateway(), + gateway: getGateways(), svcs: getServices(), ngfPolicies: getPolicies(), expects: []func(g *WithT, graph *Graph){ @@ -191,7 +213,7 @@ func TestAttachPolicies(t *testing.T) { name: "nil ReferencedServices; gateway and route policies attach", routes: getRoutes(), ngfPolicies: getPolicies(), - gateway: getGateway(), + gateway: getGateways(), expects: []func(g *WithT, graph *Graph){ expectGatewayPolicyAttachment, expectRoutePolicyAttachment, @@ -203,7 +225,7 @@ func TestAttachPolicies(t *testing.T) { routes: getRoutes(), svcs: getServices(), ngfPolicies: getPolicies(), - gateway: getGateway(), + gateway: getGateways(), expects: expectAllAttachmentList, }, } @@ -214,13 +236,13 @@ func TestAttachPolicies(t *testing.T) { g := NewWithT(t) graph := &Graph{ - Gateway: test.gateway, + Gateways: test.gateway, Routes: test.routes, ReferencedServices: test.svcs, NGFPolicies: test.ngfPolicies, } - graph.attachPolicies("nginx-gateway") + graph.attachPolicies(nil, "nginx-gateway") for _, expect := range test.expects { expect(g, graph) } @@ -275,34 +297,49 @@ func TestAttachPolicyToRoute(t *testing.T) { } } + validatorError := &policiesfakes.FakeValidator{ + ValidateGlobalSettingsStub: func(_ policies.Policy, gs *policies.GlobalSettings) []conditions.Condition { + if !gs.TelemetryEnabled { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + } + } + return nil + }, + } + tests := []struct { route *L7Route policy *Policy + validator policies.Validator name string expAncestors []PolicyAncestor expAttached bool }{ { - name: "policy attaches to http route", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "policy attaches to http route", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ {Ancestor: createExpAncestor(kinds.HTTPRoute)}, }, expAttached: true, }, { - name: "policy attaches to grpc route", - route: createGRPCRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "policy attaches to grpc route", + route: createGRPCRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ {Ancestor: createExpAncestor(kinds.GRPCRoute)}, }, expAttached: true, }, { - name: "attachment with existing ancestor", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + name: "attachment with existing ancestor", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, policy: &Policy{ Source: &policiesfakes.FakePolicy{}, Ancestors: []PolicyAncestor{ @@ -316,9 +353,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: true, }, { - name: "no attachment; unattachable route", - route: createHTTPRoute(true /*valid*/, false /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; unattachable route", + route: createHTTPRoute(true /*valid*/, false /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -328,9 +366,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: false, }, { - name: "no attachment; missing parentRefs", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, false /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; missing parentRefs", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, false /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -340,9 +379,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: false, }, { - name: "no attachment; invalid route", - route: createHTTPRoute(false /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; invalid route", + route: createHTTPRoute(false /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -354,10 +394,104 @@ func TestAttachPolicyToRoute(t *testing.T) { { name: "no attachment; max ancestors", route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, policy: &Policy{Source: createTestPolicyWithAncestors(16)}, expAncestors: nil, expAttached: false, }, + { + name: "invalid for some ParentRefs", + route: &L7Route{ + Source: &v1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: routeNsName.Name, + Namespace: routeNsName.Namespace, + }, + }, + Valid: true, + Attachable: true, + RouteType: RouteTypeHTTP, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway1", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("test-endpoint"), + }, + }, + }, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway2", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{}, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + }, + }, + validator: validatorError, + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + expAncestors: []PolicyAncestor{ + { + Ancestor: createExpAncestor(kinds.HTTPRoute), + Conditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + }, + expAttached: true, + }, + { + name: "invalid for all ParentRefs", + route: &L7Route{ + Source: &v1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: routeNsName.Name, + Namespace: routeNsName.Namespace, + }, + }, + Valid: true, + Attachable: true, + RouteType: RouteTypeHTTP, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway1", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{}, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + }, + }, + validator: validatorError, + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + expAncestors: []PolicyAncestor{ + { + Ancestor: createExpAncestor(kinds.HTTPRoute), + Conditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + }, + expAttached: false, + }, } for _, test := range tests { @@ -365,7 +499,7 @@ func TestAttachPolicyToRoute(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToRoute(test.policy, test.route, "nginx-gateway") + attachPolicyToRoute(test.policy, test.route, test.validator, "nginx-gateway") if test.expAttached { g.Expect(test.route.Policies).To(HaveLen(1)) @@ -382,23 +516,26 @@ func TestAttachPolicyToGateway(t *testing.T) { t.Parallel() gatewayNsName := types.NamespacedName{Namespace: testNs, Name: "gateway"} gateway2NsName := types.NamespacedName{Namespace: testNs, Name: "gateway2"} - ignoredGatewayNsName := types.NamespacedName{Namespace: testNs, Name: "ignored"} - newGateway := func(valid bool, nsname types.NamespacedName) *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: nsname.Namespace, - Name: nsname.Name, + newGatewayMap := func(valid bool, nsname []types.NamespacedName) map[types.NamespacedName]*Gateway { + gws := make(map[types.NamespacedName]*Gateway) + for _, name := range nsname { + gws[name] = &Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.Name, + Namespace: name.Namespace, + }, }, - }, - Valid: valid, + Valid: valid, + } } + return gws } tests := []struct { policy *Policy - gw *Gateway + gws map[types.NamespacedName]*Gateway name string expAncestors []PolicyAncestor expAttached bool @@ -413,8 +550,9 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, }, @@ -430,11 +568,12 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, Ancestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, }, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, {Ancestor: getGatewayParentRef(gatewayNsName)}, @@ -442,21 +581,22 @@ func TestAttachPolicyToGateway(t *testing.T) { expAttached: true, }, { - name: "not attached; gateway ignored", + name: "not attached; gateway is not found", policy: &Policy{ Source: &policiesfakes.FakePolicy{}, TargetRefs: []PolicyTargetRef{ { - Nsname: ignoredGatewayNsName, + Nsname: gateway2NsName, Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ { - Ancestor: getGatewayParentRef(ignoredGatewayNsName), - Conditions: []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is ignored")}, + Ancestor: getGatewayParentRef(gateway2NsName), + Conditions: []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is not found")}, }, }, expAttached: false, @@ -471,8 +611,9 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(false, gatewayNsName), + gws: newGatewayMap(false, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ { Ancestor: getGatewayParentRef(gatewayNsName), @@ -481,21 +622,6 @@ func TestAttachPolicyToGateway(t *testing.T) { }, expAttached: false, }, - { - name: "not attached; non-NGF gateway", - policy: &Policy{ - Source: &policiesfakes.FakePolicy{}, - TargetRefs: []PolicyTargetRef{ - { - Nsname: gateway2NsName, - Kind: "Gateway", - }, - }, - }, - gw: newGateway(true, gatewayNsName), - expAncestors: nil, - expAttached: false, - }, { name: "not attached; max ancestors", policy: &Policy{ @@ -506,28 +632,29 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: nil, expAttached: false, }, } for _, test := range tests { - ignoredGateways := map[types.NamespacedName]*v1.Gateway{ - ignoredGatewayNsName: nil, - } - t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToGateway(test.policy, test.policy.TargetRefs[0], test.gw, ignoredGateways, "nginx-gateway") + attachPolicyToGateway(test.policy, test.policy.TargetRefs[0], test.gws, "nginx-gateway") if test.expAttached { - g.Expect(test.gw.Policies).To(HaveLen(1)) + for _, gw := range test.gws { + g.Expect(gw.Policies).To(HaveLen(1)) + } } else { - g.Expect(test.gw.Policies).To(BeEmpty()) + for _, gw := range test.gws { + g.Expect(gw.Policies).To(BeEmpty()) + } } g.Expect(test.policy.Ancestors).To(BeEquivalentTo(test.expAncestors)) @@ -541,31 +668,37 @@ func TestAttachPolicyToService(t *testing.T) { gwNsname := types.NamespacedName{Namespace: testNs, Name: "gateway"} gw2Nsname := types.NamespacedName{Namespace: testNs, Name: "gateway2"} - getGateway := func(valid bool) *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: gwNsname.Name, - Namespace: gwNsname.Namespace, + getGateway := func(valid bool) map[types.NamespacedName]*Gateway { + return map[types.NamespacedName]*Gateway{ + gwNsname: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsname.Name, + Namespace: gwNsname.Namespace, + }, }, + Valid: valid, }, - Valid: valid, } } tests := []struct { policy *Policy svc *ReferencedService - gw *Gateway + gws map[types.NamespacedName]*Gateway name string expAncestors []PolicyAncestor expAttached bool }{ { - name: "attachment", - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + name: "attachment", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -582,9 +715,14 @@ func TestAttachPolicyToService(t *testing.T) { Ancestor: getGatewayParentRef(gwNsname), }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -601,9 +739,15 @@ func TestAttachPolicyToService(t *testing.T) { Ancestor: getGatewayParentRef(gw2Nsname), }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gw2Nsname: {}, + gwNsname: {}, + }, }, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -615,10 +759,14 @@ func TestAttachPolicyToService(t *testing.T) { }, }, { - name: "no attachment; gateway is invalid", - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, - svc: &ReferencedService{}, - gw: getGateway(false /*invalid*/), + name: "no attachment; gateway is invalid", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(false /*invalid*/), expAttached: false, expAncestors: []PolicyAncestor{ { @@ -628,13 +776,55 @@ func TestAttachPolicyToService(t *testing.T) { }, }, { - name: "no attachment; max ancestor", - policy: &Policy{Source: createTestPolicyWithAncestors(16)}, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + name: "no attachment; max ancestor", + policy: &Policy{Source: createTestPolicyWithAncestors(16), InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), + expAttached: false, + expAncestors: nil, + }, + { + name: "no attachment; does not belong to gateway", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gw2Nsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: false, expAncestors: nil, }, + { + name: "no attachment; gateway is invalid", + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + Ancestors: []PolicyAncestor{ + { + Ancestor: getGatewayParentRef(gwNsname), + }, + }, + }, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(false), + expAttached: false, + expAncestors: []PolicyAncestor{ + { + Ancestor: getGatewayParentRef(gwNsname), + }, + }, + }, } for _, test := range tests { @@ -642,7 +832,7 @@ func TestAttachPolicyToService(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToService(test.policy, test.svc, test.gw, "ctlr") + attachPolicyToService(test.policy, test.svc, test.gws, "ctlr") if test.expAttached { g.Expect(test.svc.Policies).To(HaveLen(1)) } else { @@ -663,7 +853,7 @@ func TestProcessPolicies(t *testing.T) { hrRef := createTestRef(kinds.HTTPRoute, v1.GroupName, "hr") grpcRef := createTestRef(kinds.GRPCRoute, v1.GroupName, "grpc") gatewayRef := createTestRef(kinds.Gateway, v1.GroupName, "gw") - ignoredGatewayRef := createTestRef(kinds.Gateway, v1.GroupName, "ignored") + gatewayRef2 := createTestRef(kinds.Gateway, v1.GroupName, "gw2") svcRef := createTestRef(kinds.Service, "core", "svc") // These refs reference objects that do not belong to NGF. @@ -677,7 +867,7 @@ func TestProcessPolicies(t *testing.T) { pol1, pol1Key := createTestPolicyAndKey(policyGVK, "pol1", hrRef) pol2, pol2Key := createTestPolicyAndKey(policyGVK, "pol2", grpcRef) pol3, pol3Key := createTestPolicyAndKey(policyGVK, "pol3", gatewayRef) - pol4, pol4Key := createTestPolicyAndKey(policyGVK, "pol4", ignoredGatewayRef) + pol4, pol4Key := createTestPolicyAndKey(policyGVK, "pol4", gatewayRef2) pol5, pol5Key := createTestPolicyAndKey(policyGVK, "pol5", hrDoesNotExistRef) pol6, pol6Key := createTestPolicyAndKey(policyGVK, "pol6", hrWrongGroup) pol7, pol7Key := createTestPolicyAndKey(policyGVK, "pol7", gatewayWrongGroupRef) @@ -724,8 +914,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol2Key: { Source: pol2, @@ -736,8 +927,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol3Key: { Source: pol3, @@ -748,20 +940,22 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol4Key: { Source: pol4, TargetRefs: []PolicyTargetRef{ { - Nsname: types.NamespacedName{Namespace: testNs, Name: "ignored"}, + Nsname: types.NamespacedName{Namespace: testNs, Name: "gw2"}, Kind: kinds.Gateway, Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol10Key: { Source: pol10, @@ -772,18 +966,16 @@ func TestProcessPolicies(t *testing.T) { Group: "core", }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, }, }, { name: "invalid and valid policies", validator: &policiesfakes.FakeValidator{ - ValidateStub: func( - policy policies.Policy, - _ *policies.GlobalSettings, - ) []conditions.Condition { + ValidateStub: func(policy policies.Policy) []conditions.Condition { if policy.GetName() == "pol1" { return []conditions.Condition{staticConds.NewPolicyInvalid("invalid error")} } @@ -808,8 +1000,9 @@ func TestProcessPolicies(t *testing.T) { Conditions: []conditions.Condition{ staticConds.NewPolicyInvalid("invalid error"), }, - Ancestors: []PolicyAncestor{}, - Valid: false, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: false, }, pol2Key: { Source: pol2, @@ -820,8 +1013,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, }, }, @@ -846,8 +1040,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol1ConflictKey: { Source: pol1Conflict, @@ -861,27 +1056,32 @@ func TestProcessPolicies(t *testing.T) { Conditions: []conditions.Condition{ staticConds.NewPolicyConflicted("Conflicts with another MyPolicy"), }, - Ancestors: []PolicyAncestor{}, - Valid: false, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: false, }, }, }, } - gateways := processedGateways{ - Winner: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: testNs, + gateways := map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gw"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: testNs, + }, }, + Valid: true, }, - Ignored: map[types.NamespacedName]*v1.Gateway{ - {Namespace: testNs, Name: "ignored"}: { + {Namespace: testNs, Name: "gw2"}: { + Source: &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw", + Name: "gw2", Namespace: testNs, }, }, + Valid: true, }, } @@ -913,7 +1113,7 @@ func TestProcessPolicies(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processPolicies(test.policies, test.validator, gateways, routes, services, nil) + processed := processPolicies(test.policies, test.validator, routes, services, gateways) g.Expect(processed).To(BeEquivalentTo(test.expProcessedPolicies)) }) } @@ -1039,12 +1239,15 @@ func TestProcessPolicies_RouteOverlap(t *testing.T) { }, } - gateways := processedGateways{ - Winner: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: testNs, + gateways := map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gw"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: testNs, + }, }, + Valid: true, }, } @@ -1053,7 +1256,7 @@ func TestProcessPolicies_RouteOverlap(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processPolicies(test.policies, test.validator, gateways, test.routes, nil, nil) + processed := processPolicies(test.policies, test.validator, test.routes, nil, gateways) g.Expect(processed).To(HaveLen(1)) for _, pol := range processed { diff --git a/internal/mode/static/state/graph/route_common.go b/internal/mode/static/state/graph/route_common.go index f146a55adb..e97c552f52 100644 --- a/internal/mode/static/state/graph/route_common.go +++ b/internal/mode/static/state/graph/route_common.go @@ -31,8 +31,8 @@ type ParentRef struct { SectionName *v1.SectionName // Port is the network port this Route targets. Port *v1.PortNumber - // Gateway is the NamespacedName of the referenced Gateway - Gateway types.NamespacedName + // Gateway is the metadata about the parent Gateway. + Gateway *ParentRefGateway // Idx is the index of the corresponding ParentReference in the Route. Idx int } @@ -40,17 +40,32 @@ type ParentRef struct { // ParentRefAttachmentStatus describes the attachment status of a ParentRef. type ParentRefAttachmentStatus struct { // AcceptedHostnames is an intersection between the hostnames supported by an attached Listener - // and the hostnames from this Route. Key is listener name, value is list of hostnames. + // and the hostnames from this Route. Key is , value is list of hostnames. AcceptedHostnames map[string][]string - // FailedCondition is the condition that describes why the ParentRef is not attached to the Gateway. It is set - // when Attached is false. - FailedCondition conditions.Condition + // FailedConditions are the conditions that describe why the ParentRef is not attached to the Gateway, or other + // failures that may lead to partial attachments. For example, a backendRef could be invalid, but the route can + // still attach. The backendRef condition would be displayed here. + FailedConditions []conditions.Condition // ListenerPort is the port on the Listener that the Route is attached to. ListenerPort v1.PortNumber // Attached indicates if the ParentRef is attached to the Gateway. Attached bool } +// ParentRefGateway contains the NamespacedName and EffectiveNginxProxy of the parent Gateway. +type ParentRefGateway struct { + EffectiveNginxProxy *EffectiveNginxProxy + NamespacedName types.NamespacedName +} + +// CreateParentRefGateway creates a new ParentRefGateway object using a graph.Gateway object. +func CreateParentRefGateway(gateway *Gateway) *ParentRefGateway { + return &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gateway.Source), + EffectiveNginxProxy: gateway.EffectiveNginxProxy, + } +} + type RouteType string const ( @@ -174,6 +189,11 @@ func CreateRouteKeyL4(obj client.Object) L4RouteKey { } } +// CreateGatewayListenerKey creates a key using the Gateway NamespacedName and Listener name. +func CreateGatewayListenerKey(gwNSName types.NamespacedName, listenerName string) string { + return fmt.Sprintf("%s/%s/%s", gwNSName.Namespace, gwNSName.Name, listenerName) +} + type routeRuleErrors struct { invalid field.ErrorList resolve field.ErrorList @@ -188,12 +208,11 @@ func (e routeRuleErrors) append(newErrors routeRuleErrors) routeRuleErrors { func buildL4RoutesForGateways( tlsRoutes map[types.NamespacedName]*v1alpha.TLSRoute, - gatewayNsNames []types.NamespacedName, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, + gws map[types.NamespacedName]*Gateway, resolver *referenceGrantResolver, ) map[L4RouteKey]*L4Route { - if len(gatewayNsNames) == 0 { + if len(gws) == 0 { return nil } @@ -201,15 +220,15 @@ func buildL4RoutesForGateways( for _, route := range tlsRoutes { r := buildTLSRoute( route, - gatewayNsNames, + gws, services, - npCfg, resolver.refAllowedFrom(fromTLSRoute(route.Namespace)), ) if r != nil { routes[CreateRouteKeyL4(route)] = r } } + return routes } @@ -218,20 +237,17 @@ func buildRoutesForGateways( validator validation.HTTPFieldsValidator, httpRoutes map[types.NamespacedName]*v1.HTTPRoute, grpcRoutes map[types.NamespacedName]*v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, - npCfg *NginxProxy, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) map[RouteKey]*L7Route { - if len(gatewayNsNames) == 0 { + if len(gateways) == 0 { return nil } routes := make(map[RouteKey]*L7Route) - http2disabled := isHTTP2Disabled(npCfg) - for _, route := range httpRoutes { - r := buildHTTPRoute(validator, route, gatewayNsNames, snippetsFilters) + r := buildHTTPRoute(validator, route, gateways, snippetsFilters) if r == nil { continue } @@ -239,11 +255,11 @@ func buildRoutesForGateways( routes[CreateRouteKey(route)] = r // if this route has a RequestMirror filter, build a duplicate route for the mirror - buildHTTPMirrorRoutes(routes, r, route, gatewayNsNames, snippetsFilters) + buildHTTPMirrorRoutes(routes, r, route, gateways, snippetsFilters) } for _, route := range grpcRoutes { - r := buildGRPCRoute(validator, route, gatewayNsNames, http2disabled, snippetsFilters) + r := buildGRPCRoute(validator, route, gateways, snippetsFilters) if r == nil { continue } @@ -251,23 +267,16 @@ func buildRoutesForGateways( routes[CreateRouteKey(route)] = r // if this route has a RequestMirror filter, build a duplicate route for the mirror - buildGRPCMirrorRoutes(routes, r, route, gatewayNsNames, snippetsFilters, http2disabled) + buildGRPCMirrorRoutes(routes, r, route, gateways, snippetsFilters) } return routes } -func isHTTP2Disabled(npCfg *NginxProxy) bool { - if npCfg == nil { - return false - } - return npCfg.Source.Spec.DisableHTTP2 -} - func buildSectionNameRefs( parentRefs []v1.ParentReference, routeNamespace string, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, ) ([]ParentRef, error) { sectionNameRefs := make([]ParentRef, 0, len(parentRefs)) @@ -278,8 +287,8 @@ func buildSectionNameRefs( uniqueSectionsPerGateway := make(map[key]struct{}) for i, p := range parentRefs { - gw, found := findGatewayForParentRef(p, routeNamespace, gatewayNsNames) - if !found { + gw := findGatewayForParentRef(p, routeNamespace, gws) + if gw == nil { continue } @@ -288,19 +297,20 @@ func buildSectionNameRefs( sectionName = string(*p.SectionName) } + gwNsName := client.ObjectKeyFromObject(gw.Source) k := key{ - gwNsName: gw, + gwNsName: gwNsName, sectionName: sectionName, } if _, exist := uniqueSectionsPerGateway[k]; exist { - return nil, fmt.Errorf("duplicate section name %q for Gateway %s", sectionName, gw.String()) + return nil, fmt.Errorf("duplicate section name %q for Gateway %s", sectionName, gwNsName.String()) } uniqueSectionsPerGateway[k] = struct{}{} sectionNameRefs = append(sectionNameRefs, ParentRef{ Idx: i, - Gateway: gw, + Gateway: CreateParentRefGateway(gw), SectionName: p.SectionName, Port: p.Port, }) @@ -312,85 +322,98 @@ func buildSectionNameRefs( func findGatewayForParentRef( ref v1.ParentReference, routeNamespace string, - gatewayNsNames []types.NamespacedName, -) (gwNsName types.NamespacedName, found bool) { + gws map[types.NamespacedName]*Gateway, +) *Gateway { if ref.Kind != nil && *ref.Kind != kinds.Gateway { - return types.NamespacedName{}, false + return nil } if ref.Group != nil && *ref.Group != v1.GroupName { - return types.NamespacedName{}, false + return nil } - // if the namespace is missing, assume the namespace of the HTTPRoute + // if the namespace is missing, assume the namespace of the Route ns := routeNamespace if ref.Namespace != nil { ns = string(*ref.Namespace) } - for _, gw := range gatewayNsNames { - if gw.Namespace == ns && gw.Name == string(ref.Name) { - return gw, true - } + key := types.NamespacedName{ + Namespace: ns, + Name: string(ref.Name), + } + + if gw, exists := gws[key]; exists { + return gw } - return types.NamespacedName{}, false + return nil } func bindRoutesToListeners( l7Routes map[RouteKey]*L7Route, l4Routes map[L4RouteKey]*L4Route, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, namespaces map[types.NamespacedName]*apiv1.Namespace, ) { - if gw == nil { + if len(gws) == 0 { return } - for _, r := range l7Routes { - bindL7RouteToListeners(r, gw, namespaces) - } + for _, gw := range gws { + for _, r := range l7Routes { + bindL7RouteToListeners(r, gw, namespaces) + } - routes := make([]*L7Route, 0, len(l7Routes)) - for _, r := range l7Routes { - routes = append(routes, r) - } + routes := make([]*L7Route, 0, len(l7Routes)) + for _, r := range l7Routes { + routes = append(routes, r) + } - listenerMap := getListenerHostPortMap(gw.Listeners) - isolateL7RouteListeners(routes, listenerMap) + listenerMap := getListenerHostPortMap(gw.Listeners, gw) + isolateL7RouteListeners(routes, listenerMap) - l4RouteSlice := make([]*L4Route, 0, len(l4Routes)) - for _, r := range l4Routes { - l4RouteSlice = append(l4RouteSlice, r) - } + l4RouteSlice := make([]*L4Route, 0, len(l4Routes)) + for _, r := range l4Routes { + l4RouteSlice = append(l4RouteSlice, r) + } - // Sort the slice by timestamp and name so that we process the routes in the priority order - sort.Slice(l4RouteSlice, func(i, j int) bool { - return ngfSort.LessClientObject(l4RouteSlice[i].Source, l4RouteSlice[j].Source) - }) + // Sort the slice by timestamp and name so that we process the routes in the priority order + sort.Slice(l4RouteSlice, func(i, j int) bool { + return ngfSort.LessClientObject(l4RouteSlice[i].Source, l4RouteSlice[j].Source) + }) - // portHostnamesMap exists to detect duplicate hostnames on the same port - portHostnamesMap := make(map[string]struct{}) + // portHostnamesMap exists to detect duplicate hostnames on the same port + portHostnamesMap := make(map[string]struct{}) - for _, r := range l4RouteSlice { - bindL4RouteToListeners(r, gw, namespaces, portHostnamesMap) - } + for _, r := range l4RouteSlice { + bindL4RouteToListeners(r, gw, namespaces, portHostnamesMap) + } - isolateL4RouteListeners(l4RouteSlice, listenerMap) + isolateL4RouteListeners(l4RouteSlice, listenerMap) + } } type hostPort struct { + gwNsName types.NamespacedName hostname string port v1.PortNumber } -func getListenerHostPortMap(listeners []*Listener) map[string]hostPort { +func getListenerHostPortMap(listeners []*Listener, gw *Gateway) map[string]hostPort { listenerHostPortMap := make(map[string]hostPort, len(listeners)) + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } for _, l := range listeners { - listenerHostPortMap[l.Name] = hostPort{ + key := CreateGatewayListenerKey(client.ObjectKeyFromObject(gw.Source), l.Name) + listenerHostPortMap[key] = hostPort{ hostname: getHostname(l.Source.Hostname), port: l.Source.Port, + gwNsName: gwNsName, } } + return listenerHostPortMap } @@ -420,22 +443,31 @@ func isolateHostnamesForParentRefs(parentRef []ParentRef, listenerHostnameMap ma continue } + if ref.Attachment == nil { + continue + } + acceptedHostnames := ref.Attachment.AcceptedHostnames hostnamesToRemoves := make(map[string]struct{}) - for listenerName, hostnames := range acceptedHostnames { + for key, hostnames := range acceptedHostnames { if len(hostnames) == 0 { continue } for _, h := range hostnames { for lName, lHostPort := range listenerHostnameMap { + // skip comparison if not part of the same gateway + if lHostPort.gwNsName != ref.Gateway.NamespacedName { + continue + } + // skip comparison if it is a catch all listener block if lHostPort.hostname == "" { continue } - // for L7Routes, we compare the hostname, port and listener name combination + // for L7Routes, we compare the hostname, port and listenerName combination // to identify if hostname needs to be isolated. - if h == lHostPort.hostname && listenerName != lName { + if h == lHostPort.hostname && key != lName { // for L4Routes, we only compare the hostname and listener name combination // because we do not allow l4Routes to attach to the same listener // if they share the same port and hostname. @@ -447,7 +479,7 @@ func isolateHostnamesForParentRefs(parentRef []ParentRef, listenerHostnameMap ma } isolatedHostnames := removeHostnames(hostnames, hostnamesToRemoves) - ref.Attachment.AcceptedHostnames[listenerName] = isolatedHostnames + ref.Attachment.AcceptedHostnames[key] = isolatedHostnames } } } @@ -483,7 +515,7 @@ func validateParentRef( // Case 1: Attachment is not possible because the specified SectionName does not match any Listeners in the // Gateway. if !listenerExists { - attachment.FailedCondition = staticConds.NewRouteNoMatchingParent() + attachment.FailedConditions = append(attachment.FailedConditions, staticConds.NewRouteNoMatchingParent()) return attachment, nil } @@ -491,25 +523,19 @@ func validateParentRef( if ref.Port != nil { valErr := field.Forbidden(path.Child("port"), "cannot be set") - attachment.FailedCondition = staticConds.NewRouteUnsupportedValue(valErr.Error()) - return attachment, attachableListeners - } - - // Case 3: the parentRef references an ignored Gateway resource. - - referencesWinningGw := ref.Gateway.Namespace == gw.Source.Namespace && ref.Gateway.Name == gw.Source.Name - - if !referencesWinningGw { - attachment.FailedCondition = staticConds.NewRouteNotAcceptedGatewayIgnored() + attachment.FailedConditions = append( + attachment.FailedConditions, staticConds.NewRouteUnsupportedValue(valErr.Error()), + ) return attachment, attachableListeners } - // Case 4: Attachment is not possible because Gateway is invalid + // Case 3: Attachment is not possible because Gateway is invalid if !gw.Valid { - attachment.FailedCondition = staticConds.NewRouteInvalidGateway() + attachment.FailedConditions = append(attachment.FailedConditions, staticConds.NewRouteInvalidGateway()) return attachment, attachableListeners } + return attachment, attachableListeners } @@ -526,13 +552,25 @@ func bindL4RouteToListeners( for i := range route.ParentRefs { ref := &(route.ParentRefs)[i] + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } + + if ref.Gateway.NamespacedName != gwNsName { + continue + } + attachment, attachableListeners := validateParentRef(ref, gw) - if attachment.FailedCondition != (conditions.Condition{}) { + if len(attachment.FailedConditions) > 0 { continue } - // Winning Gateway + if cond, ok := route.Spec.BackendRef.InvalidForGateways[gwNsName]; ok { + attachment.FailedConditions = append(attachment.FailedConditions, cond) + } + // Try to attach Route to all matching listeners cond, attached := tryToAttachL4RouteToListeners( @@ -544,7 +582,7 @@ func bindL4RouteToListeners( portHostnamesMap, ) if !attached { - attachment.FailedCondition = cond + attachment.FailedConditions = append(attachment.FailedConditions, cond) continue } if cond != (conditions.Condition{}) { @@ -661,7 +699,7 @@ func bindToListenerL4( return true, false, true } - refStatus.AcceptedHostnames[string(l.Source.Name)] = hostnames + refStatus.AcceptedHostnames[CreateGatewayListenerKey(l.GatewayName, l.Name)] = hostnames l.L4Routes[CreateRouteKeyL4(route.Source)] = route return true, true, true @@ -679,13 +717,36 @@ func bindL7RouteToListeners( for i := range route.ParentRefs { ref := &(route.ParentRefs)[i] + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } + + if ref.Gateway.NamespacedName != gwNsName { + continue + } + attachment, attachableListeners := validateParentRef(ref, gw) - if attachment.FailedCondition != (conditions.Condition{}) { + if route.RouteType == RouteTypeGRPC && isHTTP2Disabled(gw.EffectiveNginxProxy) { + msg := "HTTP2 is disabled - cannot configure GRPCRoutes" + attachment.FailedConditions = append( + attachment.FailedConditions, staticConds.NewRouteUnsupportedConfiguration(msg), + ) + } + + if len(attachment.FailedConditions) > 0 { continue } - // Winning Gateway + for _, rule := range route.Spec.Rules { + for _, backendRef := range rule.BackendRefs { + if cond, ok := backendRef.InvalidForGateways[gwNsName]; ok { + attachment.FailedConditions = append(attachment.FailedConditions, cond) + } + } + } + // Try to attach Route to all matching listeners cond, attached := tryToAttachL7RouteToListeners( @@ -696,7 +757,7 @@ func bindL7RouteToListeners( namespaces, ) if !attached { - attachment.FailedCondition = cond + attachment.FailedConditions = append(attachment.FailedConditions, cond) continue } if cond != (conditions.Condition{}) { @@ -707,6 +768,18 @@ func bindL7RouteToListeners( } } +func isHTTP2Disabled(npCfg *EffectiveNginxProxy) bool { + if npCfg == nil { + return false + } + + if npCfg.DisableHTTP2 == nil { + return false + } + + return *npCfg.DisableHTTP2 +} + // tryToAttachRouteToListeners tries to attach the route to the listeners that match the parentRef and the hostnames. // There are two cases: // (1) If it succeeds in attaching at least one listener it will return true. The returned condition will be empty if @@ -739,7 +812,7 @@ func tryToAttachL7RouteToListeners( return true, false } - refStatus.AcceptedHostnames[string(l.Source.Name)] = hostnames + refStatus.AcceptedHostnames[CreateGatewayListenerKey(l.GatewayName, l.Name)] = hostnames refStatus.ListenerPort = l.Source.Port l.Routes[rk] = route diff --git a/internal/mode/static/state/graph/route_common_test.go b/internal/mode/static/state/graph/route_common_test.go index 8268af036e..dc127bb3b8 100644 --- a/internal/mode/static/state/graph/route_common_test.go +++ b/internal/mode/static/state/graph/route_common_test.go @@ -54,27 +54,44 @@ func TestBuildSectionNameRefs(t *testing.T) { }, } - gwNsNames := []types.NamespacedName{gwNsName1, gwNsName2} + gws := map[types.NamespacedName]*Gateway{ + gwNsName1: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName1.Name, + Namespace: gwNsName1.Namespace, + }, + }, + }, + gwNsName2: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName2.Name, + Namespace: gwNsName2.Namespace, + }, + }, + }, + } expected := []ParentRef{ { Idx: 0, - Gateway: gwNsName1, + Gateway: CreateParentRefGateway(gws[gwNsName1]), SectionName: parentRefs[0].SectionName, }, { Idx: 2, - Gateway: gwNsName2, + Gateway: CreateParentRefGateway(gws[gwNsName2]), SectionName: parentRefs[2].SectionName, }, { Idx: 3, - Gateway: gwNsName1, + Gateway: CreateParentRefGateway(gws[gwNsName1]), SectionName: parentRefs[3].SectionName, }, { Idx: 4, - Gateway: gwNsName2, + Gateway: CreateParentRefGateway(gws[gwNsName2]), SectionName: parentRefs[4].SectionName, }, } @@ -126,7 +143,7 @@ func TestBuildSectionNameRefs(t *testing.T) { t.Parallel() g := NewWithT(t) - result, err := buildSectionNameRefs(test.parentRefs, routeNamespace, gwNsNames) + result, err := buildSectionNameRefs(test.parentRefs, routeNamespace, gws) g.Expect(result).To(Equal(test.expectedRefs)) if test.expectedError != nil { g.Expect(err).To(Equal(test.expectedError)) @@ -181,35 +198,46 @@ func TestFindGatewayForParentRef(t *testing.T) { Kind: helpers.GetPointer[gatewayv1.Kind]("NotGateway"), Name: gatewayv1.ObjectName(gwNsName2.Name), }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "wrong kind", + expectedFound: false, + name: "wrong kind", }, { ref: gatewayv1.ParentReference{ Group: helpers.GetPointer[gatewayv1.Group]("wrong-group"), Name: gatewayv1.ObjectName(gwNsName2.Name), }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "wrong group", + expectedFound: false, + name: "wrong group", }, { ref: gatewayv1.ParentReference{ Namespace: helpers.GetPointer(gatewayv1.Namespace(gwNsName1.Namespace)), Name: "some-gateway", }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "not found", + expectedFound: false, + name: "not found", }, } routeNamespace := "test-2" - gwNsNames := []types.NamespacedName{ - gwNsName1, - gwNsName2, + gws := map[types.NamespacedName]*Gateway{ + gwNsName1: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName1.Name, + Namespace: gwNsName1.Namespace, + }, + }, + }, + gwNsName2: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName2.Name, + Namespace: gwNsName2.Namespace, + }, + }, + }, } for _, test := range tests { @@ -217,9 +245,13 @@ func TestFindGatewayForParentRef(t *testing.T) { t.Parallel() g := NewWithT(t) - gw, found := findGatewayForParentRef(test.ref, routeNamespace, gwNsNames) - g.Expect(found).To(Equal(test.expectedFound)) - g.Expect(gw).To(Equal(test.expectedGwNsName)) + gw := findGatewayForParentRef(test.ref, routeNamespace, gws) + if test.expectedFound { + g.Expect(gw).ToNot(BeNil()) + g.Expect(client.ObjectKeyFromObject(gw.Source)).To(Equal(test.expectedGwNsName)) + } else { + g.Expect(gw).To(BeNil()) + } }) } } @@ -229,6 +261,10 @@ func TestBindRouteToListeners(t *testing.T) { createListener := func(name string) *Listener { return &Listener{ Name: name, + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Source: gatewayv1.Listener{ Name: gatewayv1.SectionName(name), Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("foo.example.com")), @@ -316,7 +352,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -336,7 +372,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -349,7 +385,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -363,7 +399,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNilSectionName.Spec.ParentRefs[0].SectionName, }, }, @@ -376,7 +412,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, }, }, @@ -389,7 +425,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNonExistingListener.Spec.ParentRefs[0].SectionName, }, }, @@ -402,26 +438,12 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithPort.Spec.ParentRefs[0].SectionName, Port: hrWithPort.Spec.ParentRefs[0].Port, }, }, } - ignoredGwNsName := types.NamespacedName{Namespace: "test", Name: "ignored-gateway"} - routeWithIgnoredGateway := &L7Route{ - RouteType: RouteTypeHTTP, - Source: hr, - Valid: true, - Attachable: true, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: ignoredGwNsName, - SectionName: hr.Spec.ParentRefs[0].SectionName, - }, - }, - } invalidRoute := &L7Route{ RouteType: RouteTypeHTTP, Valid: false, @@ -429,7 +451,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -443,6 +465,19 @@ func TestBindRouteToListeners(t *testing.T) { l.Source.Hostname = helpers.GetPointer[gatewayv1.Hostname]("bar.example.com") }) + routeWithInvalidBackendRefs := createNormalHTTPRoute(gw) + routeWithInvalidBackendRefs.Spec.Rules = []RouteRule{ + { + BackendRefs: []BackendRef{ + { + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + client.ObjectKeyFromObject(gw): {Message: "invalid backend"}, + }, + }, + }, + }, + } + createGRPCRouteWithSectionNameAndPort := func( sectionName *gatewayv1.SectionName, port *gatewayv1.PortNumber, @@ -487,7 +522,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -519,12 +554,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -550,12 +588,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNilSectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -582,13 +623,19 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80": {"foo.example.com"}, - "listener-8080": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80", + ): {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-8080", + ): {"foo.example.com"}, }, }, }, @@ -619,11 +666,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidListener(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidListener()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -645,13 +692,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithPort.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteUnsupportedValue( - `spec.parentRefs[0].port: Forbidden: cannot be set`, - ), + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedValue( + `spec.parentRefs[0].port: Forbidden: cannot be set`, + ), + }, AcceptedHostnames: map[string][]string{}, }, Port: hrWithPort.Spec.ParentRefs[0].Port, @@ -674,11 +723,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNonExistingListener.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -700,11 +749,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidListener(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidListener()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -726,11 +775,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNoMatchingListenerHostname(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingListenerHostname()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -740,32 +789,6 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "no matching listener hostname", }, - { - route: routeWithIgnoredGateway, - gateway: &Gateway{ - Source: gw, - Valid: true, - Listeners: []*Listener{ - createListener("listener-80-1"), - }, - }, - expectedSectionNameRefs: []ParentRef{ - { - Idx: 0, - Gateway: ignoredGwNsName, - SectionName: hr.Spec.ParentRefs[0].SectionName, - Attachment: &ParentRefAttachmentStatus{ - Attached: false, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - AcceptedHostnames: map[string][]string{}, - }, - }, - }, - expectedGatewayListeners: []*Listener{ - createListener("listener-80-1"), - }, - name: "gateway is ignored", - }, { route: invalidRoute, gateway: &Gateway{ @@ -778,7 +801,7 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: nil, SectionName: hr.Spec.ParentRefs[0].SectionName, }, @@ -800,11 +823,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidGateway(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidGateway()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -828,12 +851,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -861,12 +887,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -894,12 +923,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -935,11 +967,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -977,12 +1009,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1021,11 +1056,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwDiffNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwDiffNamespace)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -1059,12 +1094,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1101,12 +1139,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwDiffNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwDiffNamespace)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1144,11 +1185,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: gr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -1165,6 +1206,55 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "grpc route not allowed when listener kind is HTTPRoute", }, + { + route: createNormalGRPCRoute(gw), + gateway: &Gateway{ + Source: gw, + Valid: true, + Listeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.SupportedKinds = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(gr): getLastNormalGRPCRoute(), + } + }), + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(true), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw), + }, + SectionName: gr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: false, + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedConfiguration( + `HTTP2 is disabled - cannot configure GRPCRoutes`, + ), + }, + AcceptedHostnames: map[string][]string{}, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.SupportedKinds = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(gr): getLastNormalGRPCRoute(), + } + }), + }, + name: "grpc route not allowed when HTTP2 is disabled", + }, { route: createNormalHTTPRoute(gw), gateway: &Gateway{ @@ -1186,12 +1276,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1210,6 +1303,43 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "http route allowed when listener kind is HTTPRoute", }, + { + route: routeWithInvalidBackendRefs, + gateway: &Gateway{ + Source: gw, + Valid: true, + Listeners: []*Listener{ + createListener("listener-80-1"), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{ + {Message: "invalid backend"}, + }, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, + }, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(hr): routeWithInvalidBackendRefs, + } + }), + }, + name: "route still allowed if backendRef failure conditions exist", + }, } namespaces := map[types.NamespacedName]*v1.Namespace{ @@ -1220,6 +1350,7 @@ func TestBindRouteToListeners(t *testing.T) { }, }, } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) @@ -1490,6 +1621,10 @@ func TestBindL4RouteToListeners(t *testing.T) { createListener := func(name string) *Listener { return &Listener{ Name: name, + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Source: gatewayv1.Listener{ Name: gatewayv1.SectionName(name), Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("foo.example.com")), @@ -1568,7 +1703,7 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1587,7 +1722,7 @@ func TestBindL4RouteToListeners(t *testing.T) { noMatchingParentAttachment := ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } notAttachableRoute := &L4Route{ @@ -1599,7 +1734,7 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1613,27 +1748,19 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Port: helpers.GetPointer[gatewayv1.PortNumber](80), }, }, Attachable: true, } - routeReferencesWrongNamespace := &L4Route{ - Source: tr, - Spec: L4RouteSpec{ - Hostnames: tr.Spec.Hostnames, - }, - Valid: true, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), - SectionName: tr.Spec.ParentRefs[0].SectionName, - }, + + routeWithInvalidBackendRefs := createNormalRoute(gw) + routeWithInvalidBackendRefs.Spec.BackendRef = BackendRef{ + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + client.ObjectKeyFromObject(gw): {Message: "invalid backend"}, }, - Attachable: true, } tests := []struct { @@ -1649,6 +1776,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1656,12 +1787,15 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, }, @@ -1680,6 +1814,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1687,7 +1825,7 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1701,6 +1839,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-444"), }, @@ -1709,7 +1851,7 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &noMatchingParentAttachment, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -1723,6 +1865,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1731,16 +1877,15 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "UnsupportedValue", - Message: "spec.parentRefs[0].port: Forbidden: cannot be set", + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedValue( + `spec.parentRefs[0].port: Forbidden: cannot be set`, + ), }, Attached: false, }, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, Port: helpers.GetPointer[gatewayv1.PortNumber](80), }, @@ -1750,42 +1895,15 @@ func TestBindL4RouteToListeners(t *testing.T) { }, name: "port is not nil", }, - { - route: routeReferencesWrongNamespace, - gateway: &Gateway{ - Source: gw, - Valid: true, - Listeners: []*Listener{ - createListener("listener-443"), - }, - }, - expectedSectionNameRefs: []ParentRef{ - { - Attachment: &ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "GatewayIgnored", - Message: "The Gateway is ignored by the controller", - }, - Attached: false, - }, - SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), - Idx: 0, - }, - }, - expectedGatewayListeners: []*Listener{ - createListener("listener-443"), - }, - name: "ignored gateway", - }, { route: createNormalRoute(gw), gateway: &Gateway{ Source: gw, Valid: false, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1794,16 +1912,11 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "InvalidGateway", - Message: "Gateway is invalid", - }, - Attached: false, + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidGateway()}, + Attached: false, }, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -1817,8 +1930,13 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gwWrongNamespace, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "wrong", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { + l.GatewayName = client.ObjectKeyFromObject(gwWrongNamespace) l.Source.AllowedRoutes = &gatewayv1.AllowedRoutes{ Namespaces: &gatewayv1.RouteNamespaces{From: helpers.GetPointer( gatewayv1.FromNamespaces("Same"), @@ -1830,21 +1948,17 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwWrongNamespace)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "NotAllowedByListeners", - Message: "Route is not allowed by any listener", - }, + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, }, }, }, expectedGatewayListeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { + l.GatewayName = client.ObjectKeyFromObject(gwWrongNamespace) l.Source.AllowedRoutes = &gatewayv1.AllowedRoutes{ Namespaces: &gatewayv1.RouteNamespaces{From: helpers.GetPointer( gatewayv1.FromNamespaces("Same"), @@ -1859,6 +1973,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.Valid = false @@ -1868,11 +1986,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, Attached: true, }, @@ -1886,11 +2007,14 @@ func TestBindL4RouteToListeners(t *testing.T) { r.ParentRefs = []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, Attached: true, }, @@ -1908,7 +2032,11 @@ func TestBindL4RouteToListeners(t *testing.T) { route: createNormalRoute(gw), gateway: &Gateway{ Source: gw, - Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.Source.Hostname = (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")) @@ -1918,11 +2046,11 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingListenerHostname(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingListenerHostname()}, }, }, }, @@ -1940,6 +2068,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1947,11 +2079,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, }, @@ -1972,6 +2107,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1979,11 +2118,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, SectionName: helpers.GetPointer[gatewayv1.SectionName](""), @@ -2001,7 +2143,11 @@ func TestBindL4RouteToListeners(t *testing.T) { { route: createNormalRoute(gw), gateway: &Gateway{ - Source: gw, + Source: gw, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Valid: true, Listeners: []*Listener{}, }, @@ -2009,7 +2155,7 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &noMatchingParentAttachment, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -2022,7 +2168,11 @@ func TestBindL4RouteToListeners(t *testing.T) { }), gateway: &Gateway{ Source: gw, - Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, Listeners: []*Listener{ createListener("listener-443"), }, @@ -2030,11 +2180,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, SectionName: helpers.GetPointer[gatewayv1.SectionName]("listener-443"), @@ -2054,6 +2207,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.SupportedKinds = nil @@ -2063,10 +2220,10 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, }, SectionName: helpers.GetPointer[gatewayv1.SectionName]("listener-443"), }, @@ -2078,6 +2235,47 @@ func TestBindL4RouteToListeners(t *testing.T) { }, name: "route kind not allowed", }, + { + route: routeWithInvalidBackendRefs, + gateway: &Gateway{ + Source: gw, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Listeners: []*Listener{ + createListener("listener-443"), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{ + {Message: "invalid backend"}, + }, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, + }, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-443", func(l *Listener) { + l.L4Routes = map[L4RouteKey]*L4Route{ + CreateRouteKeyL4(tr): routeWithInvalidBackendRefs, + } + }), + }, + name: "route still allowed if backendRef failure conditions exist", + }, } namespaces := map[types.NamespacedName]*v1.Namespace{ @@ -2133,7 +2331,6 @@ func TestBuildL4RoutesForGateways_NoGateways(t *testing.T) { g.Expect(buildL4RoutesForGateways( tlsRoutes, - nil, services, nil, refGrantResolver, @@ -2176,6 +2373,11 @@ func TestTryToAttachL4RouteToListeners_NoAttachableListeners(t *testing.T) { g.Expect(attachable).To(BeFalse()) } +type parentRef struct { + sectionName *gatewayv1.SectionName + gw types.NamespacedName +} + func TestIsolateL4Listeners(t *testing.T) { t.Parallel() gw := &gatewayv1.Gateway{ @@ -2185,12 +2387,26 @@ func TestIsolateL4Listeners(t *testing.T) { }, } + gw1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway1", + }, + } + createTLSRouteWithSectionNameAndPort := func( name string, - sectionName *gatewayv1.SectionName, + parentRef []parentRef, ns string, hostnames ...gatewayv1.Hostname, ) *v1alpha2.TLSRoute { + var parentRefs []gatewayv1.ParentReference + for _, p := range parentRef { + parentRefs = append(parentRefs, gatewayv1.ParentReference{ + Name: gatewayv1.ObjectName(p.gw.Name), + SectionName: p.sectionName, + }) + } return &v1alpha2.TLSRoute{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -2198,12 +2414,7 @@ func TestIsolateL4Listeners(t *testing.T) { }, Spec: v1alpha2.TLSRouteSpec{ CommonRouteSpec: gatewayv1.CommonRouteSpec{ - ParentRefs: []gatewayv1.ParentReference{ - { - Name: gatewayv1.ObjectName(gw.Name), - SectionName: sectionName, - }, - }, + ParentRefs: parentRefs, }, Hostnames: hostnames, }, @@ -2213,31 +2424,56 @@ func TestIsolateL4Listeners(t *testing.T) { routeHostnames := []gatewayv1.Hostname{"bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com"} tr1 := createTLSRouteWithSectionNameAndPort( "tr1", - helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + }, + }, "test", routeHostnames..., ) tr2 := createTLSRouteWithSectionNameAndPort( "tr2", - helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, "test", routeHostnames..., ) tr3 := createTLSRouteWithSectionNameAndPort( "tr3", - helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + }, + }, "test", routeHostnames..., ) tr4 := createTLSRouteWithSectionNameAndPort( "tr4", - helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + }, + }, "test", routeHostnames..., ) tr5 := createTLSRouteWithSectionNameAndPort( "tr5", - helpers.GetPointer[gatewayv1.SectionName]("no-match"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("no-match"), + }, + }, "test", routeHostnames..., ) @@ -2256,11 +2492,8 @@ func TestIsolateL4Listeners(t *testing.T) { }, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKey{ - Namespace: gw.Namespace, - Name: gw.Name, - }, + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: sectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: acceptedHostnames, @@ -2273,29 +2506,29 @@ func TestIsolateL4Listeners(t *testing.T) { } acceptedHostnamesEmptyHostname := map[string][]string{ - "empty-hostname": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { "bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesWildcardExample := map[string][]string{ - "wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesFooWildcardExample := map[string][]string{ - "foo-wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesAbcCom := map[string][]string{ - "abc-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { "abc.foo.example.com", }, } acceptedHostnamesNoMatch := map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, } routesHostnameIntersection := []*L4Route{ @@ -2336,22 +2569,42 @@ func TestIsolateL4Listeners(t *testing.T) { } listenerMapHostnameIntersection := map[string]hostPort{ - "empty-hostname": {hostname: "", port: 80}, - "wildcard-example-com": {hostname: "*.example.com", port: 80}, - "foo-wildcard-example-com": {hostname: "*.foo.example.com", port: 80}, - "abc-com": {hostname: "abc.foo.example.com", port: 80}, - "no-match": {hostname: "no-match.cafe.com", port: 80}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { + hostname: "", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { + hostname: "*.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { + hostname: "abc.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): { + hostname: "no-match.cafe.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, } expectedResultHostnameIntersection := map[string][]ParentRef{ "tr1": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "empty-hostname": {"bar.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): {"bar.com"}, }, Attached: true, ListenerPort: 80, @@ -2361,11 +2614,14 @@ func TestIsolateL4Listeners(t *testing.T) { "tr2": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr2.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "wildcard-example-com": {"*.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "wildcard-example-com", + ): {"*.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2375,11 +2631,14 @@ func TestIsolateL4Listeners(t *testing.T) { "tr3": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "foo-wildcard-example-com": {"*.foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "foo-wildcard-example-com", + ): {"*.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2389,11 +2648,11 @@ func TestIsolateL4Listeners(t *testing.T) { "tr4": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr4.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "abc-com": {"abc.foo.example.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): {"abc.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2403,11 +2662,11 @@ func TestIsolateL4Listeners(t *testing.T) { "tr5": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr5.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, }, Attached: true, ListenerPort: 80, @@ -2444,6 +2703,43 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_flavor": {"flavor.example.com"}, } + routeHostname := []gatewayv1.Hostname{"coffee.example.com", "flavor.example.com"} + acceptedHostanamesMultipleGateways := map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + } + tlsCoffeeRoute1 := createTLSRouteWithSectionNameAndPort( + "tls_coffee", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + + tlsFlavorRoute1 := createTLSRouteWithSectionNameAndPort( + "tls_flavor", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + tests := []struct { expectedResult map[string][]ParentRef listenerMap map[string]hostPort @@ -2482,15 +2778,15 @@ func TestIsolateL4Listeners(t *testing.T) { ), }, listenerMap: map[string]hostPort{ - "tls_coffee": {hostname: "coffee.example.com", port: 443}, - "tls_tea": {hostname: "tea.example.com", port: 443}, - "tls_flavor": {hostname: "flavor.example.com", port: 443}, + "tls_coffee,test,gateway": {hostname: "coffee.example.com", port: 443}, + "tls_tea,test,gateway": {hostname: "tea.example.com", port: 443}, + "tls_flavor,test,gateway": {hostname: "flavor.example.com", port: 443}, }, expectedResult: map[string][]ParentRef{ "tls_coffee": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2505,7 +2801,7 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_tea": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2520,7 +2816,7 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_flavor": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2534,6 +2830,137 @@ func TestIsolateL4Listeners(t *testing.T) { }, }, }, + { + name: "no listener isolation for routes with overlapping hostnames but different gateways", + routes: []*L4Route{ + { + Source: tlsCoffeeRoute1, + Spec: L4RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + }, + }, + { + Source: tlsFlavorRoute1, + Spec: L4RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + }, + }, + }, + listenerMap: map[string]hostPort{ + "wildcard-example-com,test,gateway": { + hostname: "*.example.com", + port: 443, + gwNsName: client.ObjectKeyFromObject(gw), + }, + "wildcard-example-com,test,gateway1": { + hostname: "*.example.com", + port: 443, + gwNsName: client.ObjectKeyFromObject(gw), + }, + }, + expectedResult: map[string][]ParentRef{ + "tls_coffee": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + }, + "tls_flavor": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + }, + }, + }, } for _, test := range tests { @@ -2560,12 +2987,26 @@ func TestIsolateL7Listeners(t *testing.T) { }, } + gw1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway1", + }, + } + createHTTPRouteWithSectionNameAndPort := func( name string, - sectionName *gatewayv1.SectionName, + parentRef []parentRef, ns string, hostnames ...gatewayv1.Hostname, ) *gatewayv1.HTTPRoute { + var parentRefs []gatewayv1.ParentReference + for _, p := range parentRef { + parentRefs = append(parentRefs, gatewayv1.ParentReference{ + Name: gatewayv1.ObjectName(p.gw.Name), + SectionName: p.sectionName, + }) + } return &gatewayv1.HTTPRoute{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -2573,12 +3014,7 @@ func TestIsolateL7Listeners(t *testing.T) { }, Spec: gatewayv1.HTTPRouteSpec{ CommonRouteSpec: gatewayv1.CommonRouteSpec{ - ParentRefs: []gatewayv1.ParentReference{ - { - Name: gatewayv1.ObjectName(gw.Name), - SectionName: sectionName, - }, - }, + ParentRefs: parentRefs, }, Hostnames: hostnames, }, @@ -2599,11 +3035,8 @@ func TestIsolateL7Listeners(t *testing.T) { }, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKey{ - Namespace: gw.Namespace, - Name: gw.Name, - }, + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: sectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: acceptedHostnames, @@ -2618,59 +3051,84 @@ func TestIsolateL7Listeners(t *testing.T) { routeHostnames := []gatewayv1.Hostname{"bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com"} hr1 := createHTTPRouteWithSectionNameAndPort( "hr1", - helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + }, + }, "test", routeHostnames..., ) hr2 := createHTTPRouteWithSectionNameAndPort( "hr2", - helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, "test", routeHostnames..., ) hr3 := createHTTPRouteWithSectionNameAndPort( "hr3", - helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + }, + }, "test", routeHostnames..., ) hr4 := createHTTPRouteWithSectionNameAndPort( "hr4", - helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + }, + }, "test", routeHostnames..., ) hr5 := createHTTPRouteWithSectionNameAndPort( "hr5", - helpers.GetPointer[gatewayv1.SectionName]("no-match"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("no-match"), + }, + }, "test", routeHostnames..., // no matching hostname ) acceptedHostnamesEmptyHostname := map[string][]string{ - "empty-hostname": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { "bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesWildcardExample := map[string][]string{ - "wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesFooWildcardExample := map[string][]string{ - "foo-wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesAbcCom := map[string][]string{ - "abc-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { "abc.foo.example.com", }, } acceptedHostnamesNoMatch := map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, } routesHostnameIntersection := []*L7Route{ @@ -2712,22 +3170,42 @@ func TestIsolateL7Listeners(t *testing.T) { } listenerMapHostnameIntersection := map[string]hostPort{ - "empty-hostname": {hostname: "", port: 80}, - "wildcard-example-com": {hostname: "*.example.com", port: 80}, - "foo-wildcard-example-com": {hostname: "*.foo.example.com", port: 80}, - "abc-com": {hostname: "abc.foo.example.com", port: 80}, - "no-match": {hostname: "no-match.cafe.com", port: 80}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { + hostname: "", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { + hostname: "*.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { + hostname: "abc.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): { + hostname: "no-match.cafe.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, } expectedResultHostnameIntersection := map[string][]ParentRef{ "hr1": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "empty-hostname": {"bar.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): {"bar.com"}, }, Attached: true, ListenerPort: 80, @@ -2737,11 +3215,14 @@ func TestIsolateL7Listeners(t *testing.T) { "hr2": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr2.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "wildcard-example-com": {"*.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "wildcard-example-com", + ): {"*.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2751,11 +3232,14 @@ func TestIsolateL7Listeners(t *testing.T) { "hr3": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "foo-wildcard-example-com": {"*.foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "foo-wildcard-example-com", + ): {"*.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2765,11 +3249,11 @@ func TestIsolateL7Listeners(t *testing.T) { "hr4": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr4.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "abc-com": {"abc.foo.example.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): {"abc.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2779,11 +3263,11 @@ func TestIsolateL7Listeners(t *testing.T) { "hr5": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr5.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, }, Attached: true, ListenerPort: 80, @@ -2795,7 +3279,12 @@ func TestIsolateL7Listeners(t *testing.T) { routeHostnameCafeExample := []gatewayv1.Hostname{"cafe.example.com"} httpListenerRoute := createHTTPRouteWithSectionNameAndPort( "hr_cafe", - helpers.GetPointer[gatewayv1.SectionName]("http"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("http"), + }, + }, "test", routeHostnameCafeExample..., ) @@ -2834,6 +3323,45 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_flavor": {"flavor.example.com"}, } + routeHostname := []gatewayv1.Hostname{"cafe.example.com", "flavor.example.com"} + + acceptedHostNamesMultipleGateway := map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + } + + hrCoffeeRoute1 := createHTTPRouteWithSectionNameAndPort( + "hr_coffee", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + + hrFlavorRoute1 := createHTTPRouteWithSectionNameAndPort( + "hr_flavor", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + tests := []struct { expectedResult map[string][]ParentRef listenersMap map[string]hostPort @@ -2858,14 +3386,14 @@ func TestIsolateL7Listeners(t *testing.T) { ), }, listenersMap: map[string]hostPort{ - "http": {hostname: "cafe.example.com", port: 80}, - "http-different": {hostname: "cafe.example.com", port: 8080}, + "http,test,gateway": {hostname: "cafe.example.com", port: 80}, + "http-different,test,gateway": {hostname: "cafe.example.com", port: 8080}, }, expectedResult: map[string][]ParentRef{ "hr_cafe": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: httpListenerRoute.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ @@ -2904,15 +3432,15 @@ func TestIsolateL7Listeners(t *testing.T) { ), }, listenersMap: map[string]hostPort{ - "hr_coffee": {hostname: "coffee.example.com", port: 80}, - "hr_tea": {hostname: "tea.example.com", port: 80}, - "hr_flavor": {hostname: "flavor.example.com", port: 80}, + "hr_coffee,test,gateway": {hostname: "coffee.example.com", port: 80}, + "hr_tea,test,gateway": {hostname: "tea.example.com", port: 80}, + "hr_flavor,test,gateway": {hostname: "flavor.example.com", port: 80}, }, expectedResult: map[string][]ParentRef{ "hr_coffee": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2927,7 +3455,7 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_tea": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2942,7 +3470,7 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_flavor": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2956,6 +3484,137 @@ func TestIsolateL7Listeners(t *testing.T) { }, }, }, + { + name: "no listener isolation for routes with same hostname, associated with different gateways", + routes: []*L7Route{ + { + Source: hrCoffeeRoute1, + Spec: L7RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + }, + }, + { + Source: hrFlavorRoute1, + Spec: L7RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + }, + }, + }, + listenersMap: map[string]hostPort{ + "wildcard-example-com,test,gateway": { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + "wildcard-example-com,test,gateway1": { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw1), + }, + }, + expectedResult: map[string][]ParentRef{ + "hr_coffee": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrCoffeeRoute1.Spec.ParentRefs[1].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + }, + "hr_flavor": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + }, + }, + }, } for _, test := range tests { @@ -3018,3 +3677,12 @@ func TestRemoveHostnames(t *testing.T) { }) } } + +func TestBindRoutesToListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + g.Expect(func() { + bindRoutesToListeners(nil, nil, nil, nil) + }).ToNot(Panic()) +} diff --git a/internal/mode/static/state/graph/service.go b/internal/mode/static/state/graph/service.go index ad6fb817ef..7a41b07132 100644 --- a/internal/mode/static/state/graph/service.go +++ b/internal/mode/static/state/graph/service.go @@ -5,10 +5,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// A ReferencedService represents a Kubernetes Service that is referenced by a Route and that belongs to the -// winning Gateway. It does not contain the v1.Service object, because Services are resolved when building +// A ReferencedService represents a Kubernetes Service that is referenced by a Route and the Gateways it belongs to. +// It does not contain the v1.Service object, because Services are resolved when building // the dataplane.Configuration. type ReferencedService struct { + // GatewayNsNames are all the Gateways that this Service indirectly attaches to through a Route. + GatewayNsNames map[types.NamespacedName]struct{} // Policies is a list of NGF Policies that target this Service. Policies []*Policy } @@ -16,72 +18,43 @@ type ReferencedService struct { func buildReferencedServices( l7routes map[RouteKey]*L7Route, l4Routes map[L4RouteKey]*L4Route, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*ReferencedService { - if gw == nil { - return nil - } - referencedServices := make(map[types.NamespacedName]*ReferencedService) - - belongsToWinningGw := func(refs []ParentRef) bool { - for _, ref := range refs { - if ref.Gateway == client.ObjectKeyFromObject(gw.Source) { - return true - } + for gwNsName, gw := range gws { + if gw == nil { + continue } - return false - } - - // Processes both valid and invalid BackendRefs as invalid ones still have referenced services - // we may want to track. - addServicesForL7Routes := func(routeRules []RouteRule) { - for _, rule := range routeRules { - for _, ref := range rule.BackendRefs { - if ref.SvcNsName != (types.NamespacedName{}) { - referencedServices[ref.SvcNsName] = &ReferencedService{ - Policies: nil, - } + belongsToGw := func(refs []ParentRef) bool { + for _, ref := range refs { + if ref.Gateway.NamespacedName == client.ObjectKeyFromObject(gw.Source) { + return true } } + return false } - } - addServicesForL4Routes := func(route *L4Route) { - nsname := route.Spec.BackendRef.SvcNsName - if nsname != (types.NamespacedName{}) { - referencedServices[nsname] = &ReferencedService{ - Policies: nil, + // routes all have populated ParentRefs from when they were created. + // + // Get all the service names referenced from all the l7 and l4 routes. + for _, route := range l7routes { + if !route.Valid || !belongsToGw(route.ParentRefs) { + continue } - } - } - - // routes all have populated ParentRefs from when they were created. - // - // Get all the service names referenced from all the l7 and l4 routes. - for _, route := range l7routes { - if !route.Valid { - continue - } - if !belongsToWinningGw(route.ParentRefs) { - continue + // Processes both valid and invalid BackendRefs as invalid ones still have referenced services + // we may want to track. + addServicesAndGatewayForL7Routes(route.Spec.Rules, gwNsName, referencedServices) } - addServicesForL7Routes(route.Spec.Rules) - } - - for _, route := range l4Routes { - if !route.Valid { - continue - } + for _, route := range l4Routes { + if !route.Valid || !belongsToGw(route.ParentRefs) { + continue + } - if !belongsToWinningGw(route.ParentRefs) { - continue + addServicesAndGatewayForL4Routes(route, gwNsName, referencedServices) } - - addServicesForL4Routes(route) } if len(referencedServices) == 0 { @@ -90,3 +63,41 @@ func buildReferencedServices( return referencedServices } + +func addServicesAndGatewayForL4Routes( + route *L4Route, + gwNsName types.NamespacedName, + referencedServices map[types.NamespacedName]*ReferencedService, +) { + nsname := route.Spec.BackendRef.SvcNsName + if nsname != (types.NamespacedName{}) { + if _, ok := referencedServices[nsname]; !ok { + referencedServices[nsname] = &ReferencedService{ + Policies: nil, + GatewayNsNames: make(map[types.NamespacedName]struct{}), + } + } + referencedServices[nsname].GatewayNsNames[gwNsName] = struct{}{} + } +} + +func addServicesAndGatewayForL7Routes( + routeRules []RouteRule, + gwNsName types.NamespacedName, + referencedServices map[types.NamespacedName]*ReferencedService, +) { + for _, rule := range routeRules { + for _, ref := range rule.BackendRefs { + if ref.SvcNsName != (types.NamespacedName{}) { + if _, ok := referencedServices[ref.SvcNsName]; !ok { + referencedServices[ref.SvcNsName] = &ReferencedService{ + Policies: nil, + GatewayNsNames: make(map[types.NamespacedName]struct{}), + } + } + + referencedServices[ref.SvcNsName].GatewayNsNames[gwNsName] = struct{}{} + } + } + } +} diff --git a/internal/mode/static/state/graph/service_test.go b/internal/mode/static/state/graph/service_test.go index 0fa316e73f..e0ef7180ce 100644 --- a/internal/mode/static/state/graph/service_test.go +++ b/internal/mode/static/state/graph/service_test.go @@ -12,25 +12,49 @@ import ( func TestBuildReferencedServices(t *testing.T) { t.Parallel() - gwNsname := types.NamespacedName{Namespace: "test", Name: "gwNsname"} - gw := &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: gwNsname.Namespace, - Name: gwNsname.Name, + gwNsName := types.NamespacedName{Namespace: "test", Name: "gwNsname"} + gw2NsName := types.NamespacedName{Namespace: "test", Name: "gw2Nsname"} + gw3NsName := types.NamespacedName{Namespace: "test", Name: "gw3Nsname"} + gw := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gwNsName.Namespace, + Name: gwNsName.Name, + }, + }, + }, + gw2NsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gw2NsName.Namespace, + Name: gw2NsName.Name, + }, + }, + }, + gw3NsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gw3NsName.Namespace, + Name: gw3NsName.Name, + }, }, }, } - ignoredGw := types.NamespacedName{Namespace: "test", Name: "ignoredGw"} + + parentRefs := []ParentRef{ + { + Gateway: &ParentRefGateway{NamespacedName: gwNsName}, + }, + { + Gateway: &ParentRefGateway{NamespacedName: gw2NsName}, + }, + } getNormalL7Route := func() *L7Route { return &L7Route{ - ParentRefs: []ParentRef{ - { - Gateway: gwNsname, - }, - }, - Valid: true, + ParentRefs: parentRefs, + Valid: true, Spec: L7RouteSpec{ Rules: []RouteRule{ { @@ -57,12 +81,8 @@ func TestBuildReferencedServices(t *testing.T) { SvcNsName: types.NamespacedName{Namespace: "tlsroute-ns", Name: "service"}, }, }, - Valid: true, - ParentRefs: []ParentRef{ - { - Gateway: gwNsname, - }, - }, + Valid: true, + ParentRefs: parentRefs, } } @@ -137,56 +157,16 @@ func TestBuildReferencedServices(t *testing.T) { return route }) - normalL4RouteWinningAndIgnoredGws := getModifiedL4Route(func(route *L4Route) *L4Route { - route.ParentRefs = []ParentRef{ - { - Gateway: ignoredGw, - }, - { - Gateway: ignoredGw, - }, - { - Gateway: gwNsname, - }, - } - return route - }) - - normalRouteWinningAndIgnoredGws := getModifiedL7Route(func(route *L7Route) *L7Route { - route.ParentRefs = []ParentRef{ - { - Gateway: ignoredGw, - }, - { - Gateway: gwNsname, - }, - { - Gateway: ignoredGw, - }, - } - return route - }) - - normalL4RouteIgnoredGw := getModifiedL4Route(func(route *L4Route) *L4Route { - route.ParentRefs[0].Gateway = ignoredGw - return route - }) - - normalL7RouteIgnoredGw := getModifiedL7Route(func(route *L7Route) *L7Route { - route.ParentRefs[0].Gateway = ignoredGw - return route - }) - tests := []struct { l7Routes map[RouteKey]*L7Route l4Routes map[L4RouteKey]*L4Route exp map[types.NamespacedName]*ReferencedService - gw *Gateway + gws map[types.NamespacedName]*Gateway name string }{ { name: "normal routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, }, @@ -194,35 +174,65 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "banana-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "l7 route with two services in one Rule", // l4 routes don't support multiple services right now - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "two-svc-one-rule"}}: validRouteTwoServicesOneRule, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "route with one service per rule", // l4 routes don't support multiple rules right now - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "multiple valid routes with same services", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, {NamespacedName: types.NamespacedName{Name: "two-svc-one-rule"}}: validRouteTwoServicesOneRule, @@ -233,57 +243,35 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "l4-route-same-svc-as-l7-route"}}: normalL4RouteWithSameSvcAsL7Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service2"}: {}, - }, - }, - { - name: "valid routes that do not belong to winning gateway", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gws"}}: normalL7RouteIgnoredGw, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gw"}}: normalL4RouteIgnoredGw, - }, - exp: nil, - }, - { - name: "valid routes that belong to both winning and ignored gateways", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gws"}}: normalRouteWinningAndIgnoredGws, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "ignored-gw"}}: normalL4RouteWinningAndIgnoredGws, - }, - exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, - }, - }, - { - name: "valid routes with different services", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, - {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, - }, - exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "invalid routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "invalid-route"}}: invalidRoute, }, @@ -294,7 +282,7 @@ func TestBuildReferencedServices(t *testing.T) { }, { name: "combination of valid and invalid routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, {NamespacedName: types.NamespacedName{Name: "invalid-route"}}: invalidRoute, @@ -304,13 +292,23 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "banana-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "valid route no service nsname", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "no-service-nsname"}}: validRouteNoServiceNsName, }, @@ -321,7 +319,9 @@ func TestBuildReferencedServices(t *testing.T) { }, { name: "nil gateway", - gw: nil, + gws: map[types.NamespacedName]*Gateway{ + gwNsName: nil, + }, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "no-service-nsname"}}: validRouteNoServiceNsName, }, @@ -337,7 +337,7 @@ func TestBuildReferencedServices(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildReferencedServices(test.l7Routes, test.l4Routes, test.gw)).To(Equal(test.exp)) + g.Expect(buildReferencedServices(test.l7Routes, test.l4Routes, test.gws)).To(Equal(test.exp)) }) } } diff --git a/internal/mode/static/state/graph/tlsroute.go b/internal/mode/static/state/graph/tlsroute.go index 78b2378c36..8e471afad0 100644 --- a/internal/mode/static/state/graph/tlsroute.go +++ b/internal/mode/static/state/graph/tlsroute.go @@ -7,22 +7,20 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) func buildTLSRoute( gtr *v1alpha2.TLSRoute, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, refGrantResolver func(resource toResource) bool, ) *L4Route { r := &L4Route{ Source: gtr, } - sectionNameRefs, err := buildSectionNameRefs(gtr.Spec.ParentRefs, gtr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(gtr.Spec.ParentRefs, gtr.Namespace, gws) if err != nil { r.Valid = false @@ -54,14 +52,14 @@ func buildTLSRoute( return r } - br, cond := validateBackendRefTLSRoute(gtr, services, npCfg, refGrantResolver) + br, conds := validateBackendRefTLSRoute(gtr, services, r.ParentRefs, refGrantResolver) r.Spec.BackendRef = br r.Valid = true r.Attachable = true - if cond != nil { - r.Conditions = append(r.Conditions, *cond) + if len(conds) > 0 { + r.Conditions = append(r.Conditions, conds...) } return r @@ -70,9 +68,9 @@ func buildTLSRoute( func validateBackendRefTLSRoute( gtr *v1alpha2.TLSRoute, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, + parentRefs []ParentRef, refGrantResolver func(resource toResource) bool, -) (BackendRef, *conditions.Condition) { +) (BackendRef, []conditions.Condition) { // Length of BackendRefs and Rules is guaranteed to be one due to earlier check in buildTLSRoute refPath := field.NewPath("spec").Child("rules").Index(0).Child("backendRefs").Index(0) @@ -85,10 +83,11 @@ func validateBackendRefTLSRoute( refPath, ); !valid { backendRef := BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - return backendRef, &cond + return backendRef, []conditions.Condition{cond} } ns := gtr.Namespace @@ -109,22 +108,25 @@ func validateBackendRefTLSRoute( ) backendRef := BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Valid: true, + SvcNsName: svcNsName, + ServicePort: svcPort, + Valid: true, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } if err != nil { backendRef.Valid = false - return backendRef, helpers.GetPointer(staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())) + return backendRef, []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())} } - if err := verifyIPFamily(npCfg, svcIPFamily); err != nil { - backendRef.Valid = false - - return backendRef, helpers.GetPointer(staticConds.NewRouteInvalidIPFamily(err.Error())) + var conds []conditions.Condition + for _, parentRef := range parentRefs { + if err := verifyIPFamily(parentRef.Gateway.EffectiveNginxProxy, svcIPFamily); err != nil { + backendRef.Valid = backendRef.Valid || false + backendRef.InvalidForGateways[parentRef.Gateway.NamespacedName] = staticConds.NewRouteInvalidIPFamily(err.Error()) + } } - return backendRef, nil + return backendRef, conds } diff --git a/internal/mode/static/state/graph/tlsroute_test.go b/internal/mode/static/state/graph/tlsroute_test.go index 73cd8758a1..988013a594 100644 --- a/internal/mode/static/state/graph/tlsroute_test.go +++ b/internal/mode/static/state/graph/tlsroute_test.go @@ -7,10 +7,11 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" @@ -44,13 +45,31 @@ func TestBuildTLSRoute(t *testing.T) { Name: "gateway", SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), } - gatewayNsName := types.NamespacedName{ - Namespace: "test", - Name: "gateway", + + createGateway := func() *Gateway { + return &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + } + } + + modGateway := func(gw *Gateway, mod func(*Gateway) *Gateway) *Gateway { + return mod(gw) } + parentRefGraph := ParentRef{ SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), - Gateway: gatewayNsName, + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, } duplicateParentRefsGtr := createTLSRoute( "hi.example.com", @@ -267,13 +286,12 @@ func TestBuildTLSRoute(t *testing.T) { alwaysFalseRefGrantResolver := func(_ toResource) bool { return false } tests := []struct { - expected *L4Route - gtr *v1alpha2.TLSRoute - services map[types.NamespacedName]*apiv1.Service - resolver func(resource toResource) bool - name string - gatewayNsNames []types.NamespacedName - npCfg NginxProxy + expected *L4Route + gtr *v1alpha2.TLSRoute + services map[types.NamespacedName]*apiv1.Service + resolver func(resource toResource) bool + gateway *Gateway + name string }{ { gtr: duplicateParentRefsGtr, @@ -281,18 +299,18 @@ func TestBuildTLSRoute(t *testing.T) { Source: duplicateParentRefsGtr, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "duplicate parent refs", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "duplicate parent refs", }, { - gtr: noParentRefsGtr, - expected: nil, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "no parent refs", + gtr: noParentRefsGtr, + expected: nil, + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "no parent refs", }, { gtr: invalidHostnameGtr, @@ -308,10 +326,10 @@ func TestBuildTLSRoute(t *testing.T) { )}, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "invalid hostname", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "invalid hostname", }, { gtr: noRulesGtr, @@ -328,10 +346,10 @@ func TestBuildTLSRoute(t *testing.T) { )}, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "invalid rule", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "invalid rule", }, { gtr: backedRefDNEGtr, @@ -347,7 +365,8 @@ func TestBuildTLSRoute(t *testing.T) { Namespace: "test", Name: "hi", }, - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound( @@ -356,10 +375,10 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "BackendRef not found", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "BackendRef not found", }, { gtr: wrongBackendRefGroupGtr, @@ -371,7 +390,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefInvalidKind( @@ -381,7 +401,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: createSvc("hi", 80), }, @@ -398,7 +418,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefInvalidKind( @@ -408,7 +429,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: createSvc("hi", 80), }, @@ -425,7 +446,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefRefNotPermitted( @@ -435,7 +457,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: diffNsSvc, }, @@ -452,7 +474,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefUnsupportedValue( @@ -461,7 +484,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: createSvc("hi", 80), }, @@ -471,8 +494,19 @@ func TestBuildTLSRoute(t *testing.T) { { gtr: ipFamilyMismatchGtr, expected: &L4Route{ - Source: ipFamilyMismatchGtr, - ParentRefs: []ParentRef{parentRefGraph}, + Source: ipFamilyMismatchGtr, + ParentRefs: []ParentRef{ + { + SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + EffectiveNginxProxy: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, + }, + }, + }, Spec: L4RouteSpec{ Hostnames: []gatewayv1.Hostname{ "app.example.com", @@ -480,22 +514,24 @@ func TestBuildTLSRoute(t *testing.T) { BackendRef: BackendRef{ SvcNsName: svcNsName, ServicePort: apiv1.ServicePort{Port: 80}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: staticConds.NewRouteInvalidIPFamily( + "service configured with IPv4 family but NginxProxy is configured with IPv6", + ), + }, + Valid: true, }, }, - Conditions: []conditions.Condition{staticConds.NewRouteInvalidIPFamily( - "service configured with IPv4 family but NginxProxy is configured with IPv6", - )}, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: modGateway(createGateway(), func(gw *Gateway) *Gateway { + gw.EffectiveNginxProxy = &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)} + return gw + }), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: ipv4Svc, }, - npCfg: NginxProxy{ - Source: &ngfAPI.NginxProxy{Spec: ngfAPI.NginxProxySpec{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}}, - Valid: true, - }, resolver: alwaysTrueRefGrantResolver, name: "service and npcfg ip family mismatch", }, @@ -509,15 +545,16 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - SvcNsName: diffSvcNsName, - ServicePort: apiv1.ServicePort{Port: 80}, - Valid: true, + SvcNsName: diffSvcNsName, + ServicePort: apiv1.ServicePort{Port: 80}, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: diffNsSvc, }, @@ -534,15 +571,16 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - SvcNsName: svcNsName, - ServicePort: apiv1.ServicePort{Port: 80}, - Valid: true, + SvcNsName: svcNsName, + ServicePort: apiv1.ServicePort{Port: 80}, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: ipv4Svc, }, @@ -558,9 +596,8 @@ func TestBuildTLSRoute(t *testing.T) { r := buildTLSRoute( test.gtr, - test.gatewayNsNames, + map[types.NamespacedName]*Gateway{client.ObjectKeyFromObject(test.gateway.Source): test.gateway}, test.services, - &test.npCfg, test.resolver, ) g.Expect(helpers.Diff(test.expected, r)).To(BeEmpty()) diff --git a/internal/mode/static/state/statefakes/fake_change_processor.go b/internal/mode/static/state/statefakes/fake_change_processor.go index b3de756b60..c88a31ce01 100644 --- a/internal/mode/static/state/statefakes/fake_change_processor.go +++ b/internal/mode/static/state/statefakes/fake_change_processor.go @@ -33,17 +33,15 @@ type FakeChangeProcessor struct { getLatestGraphReturnsOnCall map[int]struct { result1 *graph.Graph } - ProcessStub func() (state.ChangeType, *graph.Graph) + ProcessStub func() *graph.Graph processMutex sync.RWMutex processArgsForCall []struct { } processReturns struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph } processReturnsOnCall map[int]struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph } invocations map[string][][]interface{} invocationsMutex sync.RWMutex @@ -167,7 +165,7 @@ func (fake *FakeChangeProcessor) GetLatestGraphReturnsOnCall(i int, result1 *gra }{result1} } -func (fake *FakeChangeProcessor) Process() (state.ChangeType, *graph.Graph) { +func (fake *FakeChangeProcessor) Process() *graph.Graph { fake.processMutex.Lock() ret, specificReturn := fake.processReturnsOnCall[len(fake.processArgsForCall)] fake.processArgsForCall = append(fake.processArgsForCall, struct { @@ -180,9 +178,9 @@ func (fake *FakeChangeProcessor) Process() (state.ChangeType, *graph.Graph) { return stub() } if specificReturn { - return ret.result1, ret.result2 + return ret.result1 } - return fakeReturns.result1, fakeReturns.result2 + return fakeReturns.result1 } func (fake *FakeChangeProcessor) ProcessCallCount() int { @@ -191,36 +189,33 @@ func (fake *FakeChangeProcessor) ProcessCallCount() int { return len(fake.processArgsForCall) } -func (fake *FakeChangeProcessor) ProcessCalls(stub func() (state.ChangeType, *graph.Graph)) { +func (fake *FakeChangeProcessor) ProcessCalls(stub func() *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = stub } -func (fake *FakeChangeProcessor) ProcessReturns(result1 state.ChangeType, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturns(result1 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil fake.processReturns = struct { - result1 state.ChangeType - result2 *graph.Graph - }{result1, result2} + result1 *graph.Graph + }{result1} } -func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 state.ChangeType, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil if fake.processReturnsOnCall == nil { fake.processReturnsOnCall = make(map[int]struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph }) } fake.processReturnsOnCall[i] = struct { - result1 state.ChangeType - result2 *graph.Graph - }{result1, result2} + result1 *graph.Graph + }{result1} } func (fake *FakeChangeProcessor) Invocations() map[string][][]interface{} { diff --git a/internal/mode/static/state/store.go b/internal/mode/static/state/store.go index 58bf28216a..910f257c90 100644 --- a/internal/mode/static/state/store.go +++ b/internal/mode/static/state/store.go @@ -3,7 +3,6 @@ package state import ( "fmt" - discoveryV1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -188,7 +187,7 @@ type changeTrackingUpdater struct { extractGVK kinds.MustExtractGVK supportedGVKs gvkList - changeType ChangeType + changed bool } func newChangeTrackingUpdater( @@ -221,7 +220,6 @@ func newChangeTrackingUpdater( extractGVK: extractGVK, supportedGVKs: supportedGVKs, stateChangedPredicates: stateChangedPredicates, - changeType: NoChange, } } @@ -255,7 +253,7 @@ func (s *changeTrackingUpdater) Upsert(obj client.Object) { changingUpsert := s.upsert(obj) - s.setChangeType(obj, changingUpsert) + s.changed = s.changed || changingUpsert } func (s *changeTrackingUpdater) delete(objType ngftypes.ObjectType, nsname types.NamespacedName) (changed bool) { @@ -282,28 +280,13 @@ func (s *changeTrackingUpdater) Delete(objType ngftypes.ObjectType, nsname types changingDelete := s.delete(objType, nsname) - s.setChangeType(objType, changingDelete) + s.changed = s.changed || changingDelete } -// getAndResetChangedStatus returns the type of change that occurred based on the previous updates (Upserts/Deletes). -// It also resets the changed status to NoChange. -func (s *changeTrackingUpdater) getAndResetChangedStatus() ChangeType { - changeType := s.changeType - s.changeType = NoChange - return changeType -} - -// setChangeType determines and sets the type of change that occurred. -// - if no change occurred on this object, then keep the changeType as-is (could've been set by another object event) -// - if changeType is already a ClusterStateChange, then we don't need to update the value -// - otherwise, if we are processing an Endpoint update, then this is an EndpointsOnlyChange changeType -// - otherwise, this is a different object, and is a ClusterStateChange changeType. -func (s *changeTrackingUpdater) setChangeType(obj client.Object, changed bool) { - if changed && s.changeType != ClusterStateChange { - if _, ok := obj.(*discoveryV1.EndpointSlice); ok { - s.changeType = EndpointsOnlyChange - } else { - s.changeType = ClusterStateChange - } - } +// getAndResetChangedStatus returns if a change occurred based on the previous updates (Upserts/Deletes). +// It also resets the changed status to false. +func (s *changeTrackingUpdater) getAndResetChangedStatus() bool { + changed := s.changed + s.changed = false + return changed } diff --git a/internal/mode/static/state/store_test.go b/internal/mode/static/state/store_test.go deleted file mode 100644 index 54e60264fa..0000000000 --- a/internal/mode/static/state/store_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package state - -import ( - "testing" - - . "github.com/onsi/gomega" - discoveryV1 "k8s.io/api/discovery/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - v1 "sigs.k8s.io/gateway-api/apis/v1" -) - -//nolint:paralleltest,tparallel // Order matters for these tests. -func TestSetChangeType(t *testing.T) { - t.Parallel() - ctu := newChangeTrackingUpdater(nil, nil) - - // Order matters for these cases. - tests := []struct { - obj client.Object - name string - exp ChangeType - changed bool - }{ - { - name: "no change", - exp: NoChange, - }, - { - name: "endpoint object", - obj: &discoveryV1.EndpointSlice{}, - changed: true, - exp: EndpointsOnlyChange, - }, - { - name: "non-endpoint object", - obj: &v1.HTTPRoute{}, - changed: true, - exp: ClusterStateChange, - }, - { - name: "changeType was previously set to ClusterStateChange", - obj: &discoveryV1.EndpointSlice{}, - changed: true, - exp: ClusterStateChange, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - ctu.setChangeType(test.obj, test.changed) - g.Expect(ctu.changeType).To(Equal(test.exp)) - }) - } -} diff --git a/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go b/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go index 4460ec36e3..59883a9fc7 100644 --- a/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go +++ b/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go @@ -22,11 +22,10 @@ type FakePolicyValidator struct { conflictsReturnsOnCall map[int]struct { result1 bool } - ValidateStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + ValidateStub func(policies.Policy) []conditions.Condition validateMutex sync.RWMutex validateArgsForCall []struct { arg1 policies.Policy - arg2 *policies.GlobalSettings } validateReturns struct { result1 []conditions.Condition @@ -34,6 +33,18 @@ type FakePolicyValidator struct { validateReturnsOnCall map[int]struct { result1 []conditions.Condition } + ValidateGlobalSettingsStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + validateGlobalSettingsMutex sync.RWMutex + validateGlobalSettingsArgsForCall []struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + } + validateGlobalSettingsReturns struct { + result1 []conditions.Condition + } + validateGlobalSettingsReturnsOnCall map[int]struct { + result1 []conditions.Condition + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -100,19 +111,18 @@ func (fake *FakePolicyValidator) ConflictsReturnsOnCall(i int, result1 bool) { }{result1} } -func (fake *FakePolicyValidator) Validate(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { +func (fake *FakePolicyValidator) Validate(arg1 policies.Policy) []conditions.Condition { fake.validateMutex.Lock() ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] fake.validateArgsForCall = append(fake.validateArgsForCall, struct { arg1 policies.Policy - arg2 *policies.GlobalSettings - }{arg1, arg2}) + }{arg1}) stub := fake.ValidateStub fakeReturns := fake.validateReturns - fake.recordInvocation("Validate", []interface{}{arg1, arg2}) + fake.recordInvocation("Validate", []interface{}{arg1}) fake.validateMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1) } if specificReturn { return ret.result1 @@ -126,17 +136,17 @@ func (fake *FakePolicyValidator) ValidateCallCount() int { return len(fake.validateArgsForCall) } -func (fake *FakePolicyValidator) ValidateCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { +func (fake *FakePolicyValidator) ValidateCalls(stub func(policies.Policy) []conditions.Condition) { fake.validateMutex.Lock() defer fake.validateMutex.Unlock() fake.ValidateStub = stub } -func (fake *FakePolicyValidator) ValidateArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { +func (fake *FakePolicyValidator) ValidateArgsForCall(i int) policies.Policy { fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() argsForCall := fake.validateArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakePolicyValidator) ValidateReturns(result1 []conditions.Condition) { @@ -162,6 +172,68 @@ func (fake *FakePolicyValidator) ValidateReturnsOnCall(i int, result1 []conditio }{result1} } +func (fake *FakePolicyValidator) ValidateGlobalSettings(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { + fake.validateGlobalSettingsMutex.Lock() + ret, specificReturn := fake.validateGlobalSettingsReturnsOnCall[len(fake.validateGlobalSettingsArgsForCall)] + fake.validateGlobalSettingsArgsForCall = append(fake.validateGlobalSettingsArgsForCall, struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + }{arg1, arg2}) + stub := fake.ValidateGlobalSettingsStub + fakeReturns := fake.validateGlobalSettingsReturns + fake.recordInvocation("ValidateGlobalSettings", []interface{}{arg1, arg2}) + fake.validateGlobalSettingsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsCallCount() int { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + return len(fake.validateGlobalSettingsArgsForCall) +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = stub +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + argsForCall := fake.validateGlobalSettingsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsReturns(result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + fake.validateGlobalSettingsReturns = struct { + result1 []conditions.Condition + }{result1} +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsReturnsOnCall(i int, result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + if fake.validateGlobalSettingsReturnsOnCall == nil { + fake.validateGlobalSettingsReturnsOnCall = make(map[int]struct { + result1 []conditions.Condition + }) + } + fake.validateGlobalSettingsReturnsOnCall[i] = struct { + result1 []conditions.Condition + }{result1} +} + func (fake *FakePolicyValidator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -169,6 +241,8 @@ func (fake *FakePolicyValidator) Invocations() map[string][][]interface{} { defer fake.conflictsMutex.RUnlock() fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/state/validation/validator.go b/internal/mode/static/state/validation/validator.go index 26bf281b70..f012c3c6ec 100644 --- a/internal/mode/static/state/validation/validator.go +++ b/internal/mode/static/state/validation/validator.go @@ -55,7 +55,9 @@ type GenericValidator interface { //counterfeiter:generate . PolicyValidator type PolicyValidator interface { // Validate validates an NGF Policy. - Validate(policy policies.Policy, globalSettings *policies.GlobalSettings) []conditions.Condition + Validate(policy policies.Policy) []conditions.Condition + // ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. + ValidateGlobalSettings(policy policies.Policy, globalSettings *policies.GlobalSettings) []conditions.Condition // Conflicts returns true if the two Policies conflict. Conflicts(a, b policies.Policy) bool } diff --git a/internal/mode/static/status/prepare_requests.go b/internal/mode/static/status/prepare_requests.go index 818445d8d7..fc0cfe358a 100644 --- a/internal/mode/static/status/prepare_requests.go +++ b/internal/mode/static/status/prepare_requests.go @@ -19,18 +19,12 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) -// NginxReloadResult describes the result of an NGINX reload. -type NginxReloadResult struct { - // Error is the error that occurred during the reload. - Error error -} - // PrepareRouteRequests prepares status UpdateRequests for the given Routes. func PrepareRouteRequests( l4routes map[graph.L4RouteKey]*graph.L4Route, routes map[graph.RouteKey]*graph.L7Route, transitionTime metav1.Time, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, gatewayCtlrName string, ) []frameworkStatus.UpdateRequest { reqs := make([]frameworkStatus.UpdateRequest, 0, len(routes)) @@ -107,7 +101,7 @@ func prepareRouteStatus( gatewayCtlrName string, parentRefs []graph.ParentRef, conds []conditions.Condition, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, transitionTime metav1.Time, srcGeneration int64, ) v1.RouteStatus { @@ -117,8 +111,8 @@ func prepareRouteStatus( for _, ref := range parentRefs { failedAttachmentCondCount := 0 - if ref.Attachment != nil && !ref.Attachment.Attached { - failedAttachmentCondCount = 1 + if ref.Attachment != nil { + failedAttachmentCondCount = len(ref.Attachment.FailedConditions) } allConds := make([]conditions.Condition, 0, len(conds)+len(defaultConds)+failedAttachmentCondCount) @@ -126,8 +120,8 @@ func prepareRouteStatus( // ensured by DeduplicateConditions. allConds = append(allConds, defaultConds...) allConds = append(allConds, conds...) - if failedAttachmentCondCount == 1 { - allConds = append(allConds, ref.Attachment.FailedCondition) + if failedAttachmentCondCount > 0 { + allConds = append(allConds, ref.Attachment.FailedConditions...) } if nginxReloadRes.Error != nil { @@ -142,8 +136,8 @@ func prepareRouteStatus( ps := v1.RouteParentStatus{ ParentRef: v1.ParentReference{ - Namespace: helpers.GetPointer(v1.Namespace(ref.Gateway.Namespace)), - Name: v1.ObjectName(ref.Gateway.Name), + Namespace: helpers.GetPointer(v1.Namespace(ref.Gateway.NamespacedName.Namespace)), + Name: v1.ObjectName(ref.Gateway.NamespacedName.Name), SectionName: ref.SectionName, }, ControllerName: v1.GatewayController(gatewayCtlrName), @@ -211,28 +205,16 @@ func PrepareGatewayClassRequests( // PrepareGatewayRequests prepares status UpdateRequests for the given Gateways. func PrepareGatewayRequests( gateway *graph.Gateway, - ignoredGateways map[types.NamespacedName]*v1.Gateway, transitionTime metav1.Time, gwAddresses []v1.GatewayStatusAddress, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, ) []frameworkStatus.UpdateRequest { - reqs := make([]frameworkStatus.UpdateRequest, 0, 1+len(ignoredGateways)) + reqs := make([]frameworkStatus.UpdateRequest, 0, 1) if gateway != nil { reqs = append(reqs, prepareGatewayRequest(gateway, transitionTime, gwAddresses, nginxReloadRes)) } - for nsname, gw := range ignoredGateways { - apiConds := conditions.ConvertConditions(staticConds.NewGatewayConflict(), gw.Generation, transitionTime) - reqs = append(reqs, frameworkStatus.UpdateRequest{ - NsName: nsname, - ResourceType: &v1.Gateway{}, - Setter: newGatewayStatusSetter(v1.GatewayStatus{ - Conditions: apiConds, - }), - }) - } - return reqs } @@ -240,7 +222,7 @@ func prepareGatewayRequest( gateway *graph.Gateway, transitionTime metav1.Time, gwAddresses []v1.GatewayStatusAddress, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, ) frameworkStatus.UpdateRequest { if !gateway.Valid { conds := conditions.ConvertConditions( @@ -272,9 +254,10 @@ func prepareGatewayRequest( } if nginxReloadRes.Error != nil { + msg := fmt.Sprintf("%s: %s", staticConds.ListenerMessageFailedNginxReload, nginxReloadRes.Error.Error()) conds = append( conds, - staticConds.NewListenerNotProgrammedInvalid(staticConds.ListenerMessageFailedNginxReload), + staticConds.NewListenerNotProgrammedInvalid(msg), ) } @@ -293,6 +276,8 @@ func prepareGatewayRequest( } gwConds := staticConds.NewDefaultGatewayConditions() + gwConds = append(gwConds, gateway.Conditions...) + if validListenerCount == 0 { gwConds = append(gwConds, staticConds.NewGatewayNotAcceptedListenersNotValid()...) } else if validListenerCount < len(gateway.Listeners) { @@ -300,9 +285,10 @@ func prepareGatewayRequest( } if nginxReloadRes.Error != nil { + msg := fmt.Sprintf("%s: %s", staticConds.GatewayMessageFailedNginxReload, nginxReloadRes.Error.Error()) gwConds = append( gwConds, - staticConds.NewGatewayNotProgrammedInvalid(staticConds.GatewayMessageFailedNginxReload), + staticConds.NewGatewayNotProgrammedInvalid(msg), ) } @@ -385,19 +371,24 @@ func PrepareBackendTLSPolicyRequests( conds := conditions.DeduplicateConditions(pol.Conditions) apiConds := conditions.ConvertConditions(conds, pol.Source.Generation, transitionTime) - status := v1alpha2.PolicyStatus{ - Ancestors: []v1alpha2.PolicyAncestorStatus{ - { - AncestorRef: v1.ParentReference{ - Namespace: (*v1.Namespace)(&pol.Gateway.Namespace), - Name: v1alpha2.ObjectName(pol.Gateway.Name), - Group: helpers.GetPointer[v1.Group](v1.GroupName), - Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), - }, - ControllerName: v1alpha2.GatewayController(gatewayCtlrName), - Conditions: apiConds, + policyAncestors := make([]v1alpha2.PolicyAncestorStatus, 0, len(pol.Gateways)) + for _, gwNsName := range pol.Gateways { + policyAncestorStatus := v1alpha2.PolicyAncestorStatus{ + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer(v1.Namespace(gwNsName.Namespace)), + Name: v1.ObjectName(gwNsName.Name), + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), }, - }, + ControllerName: v1alpha2.GatewayController(gatewayCtlrName), + Conditions: apiConds, + } + + policyAncestors = append(policyAncestors, policyAncestorStatus) + } + + status := v1alpha2.PolicyStatus{ + Ancestors: policyAncestors, } reqs = append(reqs, frameworkStatus.UpdateRequest{ diff --git a/internal/mode/static/status/prepare_requests_test.go b/internal/mode/static/status/prepare_requests_test.go index 5d7eb9f2ea..fbc5ede98e 100644 --- a/internal/mode/static/status/prepare_requests_test.go +++ b/internal/mode/static/status/prepare_requests_test.go @@ -3,6 +3,7 @@ package status import ( "context" "errors" + "fmt" "testing" "github.com/go-logr/logr" @@ -70,6 +71,9 @@ var ( { SectionName: helpers.GetPointer[v1.SectionName]("listener-80-2"), }, + { + SectionName: helpers.GetPointer[v1.SectionName]("listener-80-3"), + }, }, } @@ -84,7 +88,7 @@ var ( parentRefsValid = []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, SectionName: commonRouteSpecValid.ParentRefs[0].SectionName, Attachment: &graph.ParentRefAttachmentStatus{ Attached: true, @@ -92,11 +96,20 @@ var ( }, { Idx: 1, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, SectionName: commonRouteSpecValid.ParentRefs[1].SectionName, Attachment: &graph.ParentRefAttachmentStatus{ - Attached: false, - FailedCondition: invalidAttachmentCondition, + Attached: false, + FailedConditions: []conditions.Condition{invalidAttachmentCondition}, + }, + }, + { + Idx: 2, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, + SectionName: commonRouteSpecValid.ParentRefs[2].SectionName, + Attachment: &graph.ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{invalidAttachmentCondition}, }, }, } @@ -104,7 +117,7 @@ var ( parentRefsInvalid = []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, Attachment: nil, SectionName: commonRouteSpecInvalid.ParentRefs[0].SectionName, }, @@ -170,6 +183,38 @@ var ( }, }, }, + { + ParentRef: v1.ParentReference{ + Namespace: helpers.GetPointer(v1.Namespace(gwNsName.Namespace)), + Name: v1.ObjectName(gwNsName.Name), + SectionName: helpers.GetPointer[v1.SectionName]("listener-80-3"), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1.RouteConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + Reason: string(v1.RouteReasonAccepted), + Message: "The route is accepted", + }, + { + Type: string(v1.RouteConditionResolvedRefs), + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + Reason: string(v1.RouteReasonResolvedRefs), + Message: "All references are resolved", + }, + { + Type: invalidAttachmentCondition.Type, + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + }, + }, + }, }, } @@ -274,7 +319,7 @@ func TestBuildHTTPRouteStatuses(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -353,7 +398,7 @@ func TestBuildGRPCRouteStatuses(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -430,7 +475,7 @@ func TestBuildTLSRouteStatuses(t *testing.T) { routes, map[graph.RouteKey]*graph.L7Route{}, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -474,7 +519,7 @@ func TestBuildRouteStatusesNginxErr(t *testing.T) { ParentRefs: []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, Attachment: &graph.ParentRefAttachmentStatus{ Attached: true, }, @@ -534,7 +579,7 @@ func TestBuildRouteStatusesNginxErr(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{Error: errors.New("test error")}, + graph.NginxReloadResult{Error: errors.New("test error")}, gatewayCtlrName, ) @@ -740,77 +785,15 @@ func TestBuildGatewayStatuses(t *testing.T) { routeKey := graph.RouteKey{NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}} tests := []struct { - nginxReloadRes NginxReloadResult - gateway *graph.Gateway - ignoredGateways map[types.NamespacedName]*v1.Gateway - expected map[types.NamespacedName]v1.GatewayStatus - name string + nginxReloadRes graph.NginxReloadResult + gateway *graph.Gateway + expected map[types.NamespacedName]v1.GatewayStatus + name string }{ { name: "nil gateway and no ignored gateways", expected: map[types.NamespacedName]v1.GatewayStatus{}, }, - { - name: "nil gateway and ignored gateways", - ignoredGateways: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "ignored-1"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored-1", - Namespace: "test", - Generation: 1, - }, - }, - {Namespace: "test", Name: "ignored-2"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored-2", - Namespace: "test", - Generation: 2, - }, - }, - }, - expected: map[types.NamespacedName]v1.GatewayStatus{ - {Namespace: "test", Name: "ignored-1"}: { - Conditions: []metav1.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 1, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - { - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - ObservedGeneration: 1, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - }, - }, - {Namespace: "test", Name: "ignored-2"}: { - Conditions: []metav1.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 2, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - { - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - ObservedGeneration: 2, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - }, - }, - }, - }, { name: "valid gateway; all valid listeners", gateway: &graph.Gateway{ @@ -1087,7 +1070,7 @@ func TestBuildGatewayStatuses(t *testing.T) { ObservedGeneration: 2, LastTransitionTime: transitionTime, Reason: string(v1.GatewayReasonInvalid), - Message: staticConds.GatewayMessageFailedNginxReload, + Message: fmt.Sprintf("%s: test error", staticConds.GatewayMessageFailedNginxReload), }, }, Listeners: []v1.ListenerStatus{ @@ -1125,14 +1108,125 @@ func TestBuildGatewayStatuses(t *testing.T) { ObservedGeneration: 2, LastTransitionTime: transitionTime, Reason: string(v1.ListenerReasonInvalid), - Message: staticConds.ListenerMessageFailedNginxReload, + Message: fmt.Sprintf("%s: test error", staticConds.ListenerMessageFailedNginxReload), }, }, }, }, }, }, - nginxReloadRes: NginxReloadResult{Error: errors.New("test error")}, + nginxReloadRes: graph.NginxReloadResult{Error: errors.New("test error")}, + }, + { + name: "valid gateway with valid parametersRef; all valid listeners", + gateway: &graph.Gateway{ + Source: createGateway(), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayResolvedRefs(), + }, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAccepted), + Message: "Gateway is accepted", + }, + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonProgrammed), + Message: "Gateway is programmed", + }, + { + Type: string(staticConds.GatewayResolvedRefs), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(staticConds.GatewayReasonResolvedRefs), + Message: "ParametersRef resource is resolved", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, + }, + { + name: "valid gateway with invalid parametersRef; all valid listeners", + gateway: &graph.Gateway{ + Source: createGateway(), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters("ParametersRef not found"), + }, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonProgrammed), + Message: "Gateway is programmed", + }, + { + Type: string(staticConds.GatewayResolvedRefs), + Status: metav1.ConditionFalse, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(staticConds.GatewayReasonParamsRefNotFound), + Message: "ParametersRef resource could not be found", + }, + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonInvalidParameters), + Message: "Gateway is accepted, but ParametersRef is ignored due to an error: ParametersRef not found", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, }, } @@ -1152,17 +1246,10 @@ func TestBuildGatewayStatuses(t *testing.T) { expectedTotalReqs++ } - for _, gw := range test.ignoredGateways { - err := k8sClient.Create(context.Background(), gw) - g.Expect(err).ToNot(HaveOccurred()) - expectedTotalReqs++ - } - updater := statusFramework.NewUpdater(k8sClient, logr.Discard()) reqs := PrepareGatewayRequests( test.gateway, - test.ignoredGateways, transitionTime, addr, test.nginxReloadRes, @@ -1192,6 +1279,7 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { type policyCfg struct { Name string Conditions []conditions.Condition + Gateways []types.NamespacedName Valid bool Ignored bool IsReferenced bool @@ -1210,7 +1298,7 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { Ignored: policyCfg.Ignored, IsReferenced: policyCfg.IsReferenced, Conditions: policyCfg.Conditions, - Gateway: types.NamespacedName{Name: "gateway", Namespace: "test"}, + Gateways: policyCfg.Gateways, } } @@ -1222,12 +1310,19 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { Valid: true, IsReferenced: true, Conditions: attachedConds, + Gateways: []types.NamespacedName{ + {Namespace: "test", Name: "gateway"}, + {Namespace: "test", Name: "gateway-2"}, + }, } invalidPolicyCfg := policyCfg{ Name: "invalid-bt", IsReferenced: true, Conditions: invalidConds, + Gateways: []types.NamespacedName{ + {Namespace: "test", Name: "gateway"}, + }, } ignoredPolicyCfg := policyCfg{ @@ -1280,6 +1375,25 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { }, }, }, + { + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer[v1.Namespace]("test"), + Name: "gateway-2", + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1alpha2.PolicyConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + LastTransitionTime: transitionTime, + Reason: string(v1alpha2.PolicyReasonAccepted), + Message: "Policy is accepted", + }, + }, + }, }, }, }, @@ -1358,6 +1472,25 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { }, }, }, + { + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer[v1.Namespace]("test"), + Name: "gateway-2", + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1alpha2.PolicyConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + LastTransitionTime: transitionTime, + Reason: string(v1alpha2.PolicyReasonAccepted), + Message: "Policy is accepted", + }, + }, + }, }, }, }, diff --git a/internal/mode/static/status/queue.go b/internal/mode/static/status/queue.go new file mode 100644 index 0000000000..991718648b --- /dev/null +++ b/internal/mode/static/status/queue.go @@ -0,0 +1,81 @@ +package status + +import ( + "context" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// UpdateType is the type of status update to perform. +type UpdateType int + +const ( + // UpdateAll means to update statuses of all Gateway API resources. + UpdateAll = iota + // UpdateGateway means to just update the status of the Gateway resource. + UpdateGateway +) + +// QueueObject is the object to be passed to the queue for status updates. +type QueueObject struct { + // GatewayService is the Gateway Service that was updated. When set, UpdateType should be UpdateGateway. + // Set by the provisioner + GatewayService *corev1.Service + Error error + Deployment types.NamespacedName + UpdateType UpdateType +} + +// Queue represents a queue with unlimited size. +type Queue struct { + notifyCh chan struct{} + items []*QueueObject + + lock sync.Mutex +} + +// NewQueue returns a new Queue object. +func NewQueue() *Queue { + return &Queue{ + items: []*QueueObject{}, + notifyCh: make(chan struct{}, 1), + } +} + +// Enqueue adds an item to the queue and notifies any blocked readers. +func (q *Queue) Enqueue(item *QueueObject) { + q.lock.Lock() + defer q.lock.Unlock() + + q.items = append(q.items, item) + + select { + case q.notifyCh <- struct{}{}: + default: + } +} + +// Dequeue removes and returns the front item from the queue. +// It blocks if the queue is empty or when the context is canceled. +func (q *Queue) Dequeue(ctx context.Context) *QueueObject { + q.lock.Lock() + defer q.lock.Unlock() + + for len(q.items) == 0 { + q.lock.Unlock() + select { + case <-ctx.Done(): + q.lock.Lock() + return nil + case <-q.notifyCh: + q.lock.Lock() + } + } + + front := q.items[0] + q.items = q.items[1:] + + return front +} diff --git a/internal/mode/static/status/queue_test.go b/internal/mode/static/status/queue_test.go new file mode 100644 index 0000000000..8ed8bbb5ab --- /dev/null +++ b/internal/mode/static/status/queue_test.go @@ -0,0 +1,98 @@ +package status + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" +) + +func TestNewQueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + + g.Expect(q).ToNot(BeNil()) + g.Expect(q.items).To(BeEmpty()) +} + +func TestEnqueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + UpdateType: UpdateAll, + } + q.Enqueue(item) + + g.Expect(q.items).To(HaveLen(1)) + g.Expect(q.items[0]).To(Equal(item)) +} + +func TestDequeue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + UpdateType: UpdateAll, + } + q.Enqueue(item) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dequeuedItem := q.Dequeue(ctx) + g.Expect(dequeuedItem).To(Equal(item)) + g.Expect(q.items).To(BeEmpty()) +} + +func TestDequeueEmptyQueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + dequeuedItem := q.Dequeue(ctx) + g.Expect(dequeuedItem).To(BeNil()) +} + +func TestDequeueWithMultipleItems(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item1 := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-1"}, + UpdateType: UpdateAll, + } + item2 := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-2"}, + UpdateType: UpdateAll, + } + q.Enqueue(item1) + q.Enqueue(item2) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dequeuedItem1 := q.Dequeue(ctx) + g.Expect(dequeuedItem1).To(Equal(item1)) + + dequeuedItem2 := q.Dequeue(ctx) + + g.Expect(dequeuedItem2).To(Equal(item2)) + g.Expect(q.items).To(BeEmpty()) +} diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index a349475a36..66a476c66b 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -34,7 +34,7 @@ type GraphGetter interface { // ConfigurationGetter gets the latest Configuration. type ConfigurationGetter interface { - GetLatestConfiguration() *dataplane.Configuration + GetLatestConfiguration() []*dataplane.Configuration } // Data is telemetry data. @@ -60,8 +60,10 @@ type Data struct { // then lastly by directive string. SnippetsFiltersDirectivesCount []int64 NGFResourceCounts // embedding is required by the generator. - // NGFReplicaCount is the number of replicas of the NGF Pod. - NGFReplicaCount int64 + // NginxPodCount is the total number of Nginx data plane Pods. + NginxPodCount int64 + // ControlPlanePodCount is the total number of NGF control plane Pods. + ControlPlanePodCount int64 } // NGFResourceCounts stores the counts of all relevant resources that NGF processes and generates configuration from. @@ -99,6 +101,8 @@ type NGFResourceCounts struct { SnippetsFilterCount int64 // UpstreamSettingsPolicyCount is the number of UpstreamSettingsPolicies. UpstreamSettingsPolicyCount int64 + // GatewayAttachedNpCount is the total number of NginxProxy resources that are attached to a Gateway. + GatewayAttachedNpCount int64 } // DataCollectorConfig holds configuration parameters for DataCollectorImpl. @@ -145,10 +149,7 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { return Data{}, fmt.Errorf("failed to collect cluster information: %w", err) } - graphResourceCount, err := collectGraphResourceCount(g, c.cfg.ConfigurationGetter) - if err != nil { - return Data{}, fmt.Errorf("failed to collect NGF resource counts: %w", err) - } + graphResourceCount := collectGraphResourceCount(g, c.cfg.ConfigurationGetter) replicaSet, err := getPodReplicaSet(ctx, c.cfg.K8sClientReader, c.cfg.PodNSName) if err != nil { @@ -167,6 +168,8 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { snippetsFiltersDirectives, snippetsFiltersDirectivesCount := collectSnippetsFilterDirectives(g) + nginxPodCount := getNginxPodCount(g) + data := Data{ Data: tel.Data{ ProjectName: "NGF", @@ -182,9 +185,10 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { ImageSource: c.cfg.ImageSource, FlagNames: c.cfg.Flags.Names, FlagValues: c.cfg.Flags.Values, - NGFReplicaCount: int64(replicaCount), SnippetsFiltersDirectives: snippetsFiltersDirectives, SnippetsFiltersDirectivesCount: snippetsFiltersDirectivesCount, + NginxPodCount: nginxPodCount, + ControlPlanePodCount: int64(replicaCount), } return data, nil @@ -193,23 +197,16 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { func collectGraphResourceCount( g *graph.Graph, configurationGetter ConfigurationGetter, -) (NGFResourceCounts, error) { +) NGFResourceCounts { ngfResourceCounts := NGFResourceCounts{} - cfg := configurationGetter.GetLatestConfiguration() - - if cfg == nil { - return ngfResourceCounts, errors.New("latest configuration cannot be nil") - } + configs := configurationGetter.GetLatestConfiguration() ngfResourceCounts.GatewayClassCount = int64(len(g.IgnoredGatewayClasses)) if g.GatewayClass != nil { ngfResourceCounts.GatewayClassCount++ } - ngfResourceCounts.GatewayCount = int64(len(g.IgnoredGateways)) - if g.Gateway != nil { - ngfResourceCounts.GatewayCount++ - } + ngfResourceCounts.GatewayCount = int64(len(g.Gateways)) routeCounts := computeRouteCount(g.Routes, g.L4Routes) ngfResourceCounts.HTTPRouteCount = routeCounts.HTTPRouteCount @@ -219,9 +216,11 @@ func collectGraphResourceCount( ngfResourceCounts.SecretCount = int64(len(g.ReferencedSecrets)) ngfResourceCounts.ServiceCount = int64(len(g.ReferencedServices)) - for _, upstream := range cfg.Upstreams { - if upstream.ErrorMsg == "" { - ngfResourceCounts.EndpointCount += int64(len(upstream.Endpoints)) + for _, cfg := range configs { + for _, upstream := range cfg.Upstreams { + if upstream.ErrorMsg == "" { + ngfResourceCounts.EndpointCount += int64(len(upstream.Endpoints)) + } } } @@ -246,13 +245,22 @@ func collectGraphResourceCount( } } - if g.NginxProxy != nil { - ngfResourceCounts.NginxProxyCount = 1 + ngfResourceCounts.NginxProxyCount = int64(len(g.ReferencedNginxProxies)) + ngfResourceCounts.SnippetsFilterCount = int64(len(g.SnippetsFilters)) + + var gatewayAttachedNPCount int64 + if g.GatewayClass != nil && g.GatewayClass.NginxProxy != nil { + gatewayClassNP := g.GatewayClass.NginxProxy + for _, np := range g.ReferencedNginxProxies { + if np != gatewayClassNP { + gatewayAttachedNPCount++ + } + } } - ngfResourceCounts.SnippetsFilterCount = int64(len(g.SnippetsFilters)) + ngfResourceCounts.GatewayAttachedNpCount = gatewayAttachedNPCount - return ngfResourceCounts, nil + return ngfResourceCounts } type RouteCounts struct { @@ -506,3 +514,22 @@ func parseDirectiveContextMapIntoLists(directiveContextMap map[sfDirectiveContex return directiveContextList, countList } + +func getNginxPodCount(g *graph.Graph) int64 { + var count int64 + for _, gateway := range g.Gateways { + replicas := int64(1) + + np := gateway.EffectiveNginxProxy + if np != nil && + np.Kubernetes != nil && + np.Kubernetes.Deployment != nil && + np.Kubernetes.Deployment.Replicas != nil { + replicas = int64(*np.Kubernetes.Deployment.Replicas) + } + + count += replicas + } + + return count +} diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index 845cb459fc..c8a17286dd 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -18,6 +18,8 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/framework/kubernetes/kubernetesfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" @@ -170,7 +172,7 @@ var _ = Describe("Collector", Ordered, func() { ClusterNodeCount: 1, }, NGFResourceCounts: telemetry.NGFResourceCounts{}, - NGFReplicaCount: 1, + ControlPlanePodCount: 1, ImageSource: "local", FlagNames: flags.Names, FlagValues: flags.Values, @@ -183,7 +185,7 @@ var _ = Describe("Collector", Ordered, func() { fakeConfigurationGetter = &telemetryfakes.FakeConfigurationGetter{} fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) dataCollector = telemetry.NewDataCollectorImpl(telemetry.DataCollectorConfig{ K8sClientReader: k8sClientReader, @@ -262,6 +264,24 @@ var _ = Describe("Collector", Ordered, func() { k8sClientReader.ListCalls(createListCallsFunc(nodes)) + k8sClientReader.GetCalls(mergeGetCallsWithBase(createGetCallsFunc( + &appsv1.ReplicaSet{ + Spec: appsv1.ReplicaSetSpec{ + Replicas: helpers.GetPointer(int32(2)), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replica", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "Deployment1", + UID: "test-uid-replicaSet", + }, + }, + }, + }, + ))) + secret1 := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret1"}} secret2 := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret2"}} nilsecret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "nilsecret"}} @@ -270,17 +290,39 @@ var _ = Describe("Collector", Ordered, func() { svc2 := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc2"}} nilsvc := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nilsvc"}} + gcNP := graph.NginxProxy{ + Source: nil, + ErrMsgs: nil, + Valid: false, + } + graph := &graph.Graph{ - GatewayClass: &graph.GatewayClass{}, - Gateway: &graph.Gateway{}, + GatewayClass: &graph.GatewayClass{NginxProxy: &gcNP}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Name: "gateway1"}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &v1alpha2.KubernetesSpec{ + Deployment: &v1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(1)), + }, + }, + }, + }, + {Name: "gateway2"}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &v1alpha2.KubernetesSpec{ + Deployment: &v1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + }, + }, + {Name: "gateway3"}: {}, + }, IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ {Name: "ignoredGC1"}: {}, {Name: "ignoredGC2"}: {}, }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Name: "ignoredGw1"}: {}, - {Name: "ignoredGw2"}: {}, - }, Routes: map[graph.RouteKey]*graph.L7Route{ {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}}: {RouteType: graph.RouteTypeHTTP}, {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-2"}}: {RouteType: graph.RouteTypeHTTP}, @@ -334,7 +376,11 @@ var _ = Describe("Collector", Ordered, func() { GVK: schema.GroupVersionKind{Kind: kinds.UpstreamSettingsPolicy}, }: {}, }, - NginxProxy: &graph.NginxProxy{}, + ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ + {Namespace: "test", Name: "NginxProxy-1"}: &gcNP, + {Namespace: "test", Name: "NginxProxy-2"}: {Valid: true}, + {Namespace: "test", Name: "NginxProxy-3"}: {Valid: true}, + }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: { Snippets: map[ngfAPI.NginxContext]string{ @@ -366,31 +412,47 @@ var _ = Describe("Collector", Ordered, func() { }, } - config := &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "upstream1", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, - }, { - Address: "endpoint2", - Port: 80, - }, { - Address: "endpoint3", - Port: 80, + configs := []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream1", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, { + Address: "endpoint2", + Port: 80, + }, { + Address: "endpoint3", + Port: 80, + }, + }, + }, + { + Name: "upstream2", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, }, }, }, - { - Name: "upstream2", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, + }, + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream3", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint4", + Port: 80, + }, }, }, }, @@ -398,7 +460,7 @@ var _ = Describe("Collector", Ordered, func() { } fakeGraphGetter.GetLatestGraphReturns(graph) - fakeConfigurationGetter.GetLatestConfigurationReturns(config) + fakeConfigurationGetter.GetLatestConfigurationReturns(configs) expData.ClusterNodeCount = 3 expData.NGFResourceCounts = telemetry.NGFResourceCounts{ @@ -408,15 +470,16 @@ var _ = Describe("Collector", Ordered, func() { TLSRouteCount: 3, SecretCount: 3, ServiceCount: 3, - EndpointCount: 4, + EndpointCount: 5, GRPCRouteCount: 2, BackendTLSPolicyCount: 3, GatewayAttachedClientSettingsPolicyCount: 1, RouteAttachedClientSettingsPolicyCount: 2, ObservabilityPolicyCount: 1, - NginxProxyCount: 1, + NginxProxyCount: 3, SnippetsFilterCount: 3, UpstreamSettingsPolicyCount: 1, + GatewayAttachedNpCount: 2, } expData.ClusterVersion = "1.29.2" expData.ClusterPlatform = "kind" @@ -444,6 +507,11 @@ var _ = Describe("Collector", Ordered, func() { 1, } + // one gateway with one replica + one gateway with three replicas + one gateway with replica field + // empty + expData.NginxPodCount = int64(5) + expData.ControlPlanePodCount = int64(2) + data, err := dataCollector.Collect(ctx) Expect(err).ToNot(HaveOccurred()) @@ -567,7 +635,7 @@ var _ = Describe("Collector", Ordered, func() { Describe("NGF resource count collector", func() { var ( graph1 *graph.Graph - config1, invalidUpstreamsConfig *dataplane.Configuration + config1, invalidUpstreamsConfig []*dataplane.Configuration ) BeforeAll(func() { @@ -575,8 +643,10 @@ var _ = Describe("Collector", Ordered, func() { svc := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1"}} graph1 = &graph.Graph{ - GatewayClass: &graph.GatewayClass{}, - Gateway: &graph.Gateway{}, + GatewayClass: &graph.GatewayClass{NginxProxy: &graph.NginxProxy{Valid: true}}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Name: "gateway1"}: {}, + }, Routes: map[graph.RouteKey]*graph.L7Route{ {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}}: {RouteType: graph.RouteTypeHTTP}, }, @@ -613,49 +683,58 @@ var _ = Describe("Collector", Ordered, func() { GVK: schema.GroupVersionKind{Kind: kinds.UpstreamSettingsPolicy}, }: {}, }, - NginxProxy: &graph.NginxProxy{}, + ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ + {Namespace: "test", Name: "NginxProxy-1"}: {Valid: true}, + }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: {}, }, + BackendTLSPolicies: map[types.NamespacedName]*graph.BackendTLSPolicy{ + {Namespace: "test", Name: "BackendTLSPolicy-1"}: {}, + }, } - config1 = &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "upstream1", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, + config1 = []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream1", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, }, }, }, }, } - invalidUpstreamsConfig = &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "invalidUpstream", - ErrorMsg: "there is an error here", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, - }, { - Address: "endpoint2", - Port: 80, - }, { - Address: "endpoint3", - Port: 80, + invalidUpstreamsConfig = []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "invalidUpstream", + ErrorMsg: "there is an error here", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, { + Address: "endpoint2", + Port: 80, + }, { + Address: "endpoint3", + Port: 80, + }, }, }, - }, - { - Name: "emptyUpstream", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{}, + { + Name: "emptyUpstream", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{}, + }, }, }, } @@ -664,7 +743,7 @@ var _ = Describe("Collector", Ordered, func() { When("collecting NGF resource counts", func() { It("collects correct data for graph with no resources", func(ctx SpecContext) { fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) expData.NGFResourceCounts = telemetry.NGFResourceCounts{} @@ -692,7 +771,10 @@ var _ = Describe("Collector", Ordered, func() { NginxProxyCount: 1, SnippetsFilterCount: 1, UpstreamSettingsPolicyCount: 1, + GatewayAttachedNpCount: 1, + BackendTLSPolicyCount: 1, } + expData.NginxPodCount = 1 data, err := dataCollector.Collect(ctx) @@ -714,7 +796,7 @@ var _ = Describe("Collector", Ordered, func() { When("it encounters an error while collecting data", func() { BeforeEach(func() { fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) }) It("should error on nil latest graph", func(ctx SpecContext) { expectedError := errors.New("failed to collect telemetry data: latest graph cannot be nil") @@ -723,14 +805,6 @@ var _ = Describe("Collector", Ordered, func() { _, err := dataCollector.Collect(ctx) Expect(err).To(MatchError(expectedError)) }) - - It("should error on nil latest configuration", func(ctx SpecContext) { - expectedError := errors.New("latest configuration cannot be nil") - fakeConfigurationGetter.GetLatestConfigurationReturns(nil) - - _, err := dataCollector.Collect(ctx) - Expect(err).To(MatchError(expectedError)) - }) }) }) }) diff --git a/internal/mode/static/telemetry/data.avdl b/internal/mode/static/telemetry/data.avdl index 6909878866..95d99f316b 100644 --- a/internal/mode/static/telemetry/data.avdl +++ b/internal/mode/static/telemetry/data.avdl @@ -102,8 +102,14 @@ attached at the Gateway level. */ /** UpstreamSettingsPolicyCount is the number of UpstreamSettingsPolicies. */ long? UpstreamSettingsPolicyCount = null; - /** NGFReplicaCount is the number of replicas of the NGF Pod. */ - long? NGFReplicaCount = null; + /** GatewayAttachedNpCount is the total number of NginxProxy resources that are attached to a Gateway. */ + long? GatewayAttachedNpCount = null; + + /** NginxPodCount is the total number of Nginx data plane Pods. */ + long? NginxPodCount = null; + + /** ControlPlanePodCount is the total number of NGF control plane Pods. */ + long? ControlPlanePodCount = null; } } diff --git a/internal/mode/static/telemetry/data_attributes_generated.go b/internal/mode/static/telemetry/data_attributes_generated.go index 553925b0fd..afbd8dfb1f 100644 --- a/internal/mode/static/telemetry/data_attributes_generated.go +++ b/internal/mode/static/telemetry/data_attributes_generated.go @@ -20,7 +20,8 @@ func (d *Data) Attributes() []attribute.KeyValue { attrs = append(attrs, attribute.StringSlice("SnippetsFiltersDirectives", d.SnippetsFiltersDirectives)) attrs = append(attrs, attribute.Int64Slice("SnippetsFiltersDirectivesCount", d.SnippetsFiltersDirectivesCount)) attrs = append(attrs, d.NGFResourceCounts.Attributes()...) - attrs = append(attrs, attribute.Int64("NGFReplicaCount", d.NGFReplicaCount)) + attrs = append(attrs, attribute.Int64("NginxPodCount", d.NginxPodCount)) + attrs = append(attrs, attribute.Int64("ControlPlanePodCount", d.ControlPlanePodCount)) return attrs } diff --git a/internal/mode/static/telemetry/data_test.go b/internal/mode/static/telemetry/data_test.go index d2dfe9516b..867424e145 100644 --- a/internal/mode/static/telemetry/data_test.go +++ b/internal/mode/static/telemetry/data_test.go @@ -40,10 +40,12 @@ func TestDataAttributes(t *testing.T) { NginxProxyCount: 12, SnippetsFilterCount: 13, UpstreamSettingsPolicyCount: 14, + GatewayAttachedNpCount: 15, }, - NGFReplicaCount: 3, SnippetsFiltersDirectives: []string{"main-three-count", "http-two-count", "server-one-count"}, SnippetsFiltersDirectivesCount: []int64{3, 2, 1}, + NginxPodCount: 3, + ControlPlanePodCount: 3, } expected := []attribute.KeyValue{ @@ -79,7 +81,9 @@ func TestDataAttributes(t *testing.T) { attribute.Int64("NginxProxyCount", 12), attribute.Int64("SnippetsFilterCount", 13), attribute.Int64("UpstreamSettingsPolicyCount", 14), - attribute.Int64("NGFReplicaCount", 3), + attribute.Int64("GatewayAttachedNpCount", 15), + attribute.Int64("NginxPodCount", 3), + attribute.Int64("ControlPlanePodCount", 3), } result := data.Attributes() @@ -122,7 +126,9 @@ func TestDataAttributesWithEmptyData(t *testing.T) { attribute.Int64("NginxProxyCount", 0), attribute.Int64("SnippetsFilterCount", 0), attribute.Int64("UpstreamSettingsPolicyCount", 0), - attribute.Int64("NGFReplicaCount", 0), + attribute.Int64("GatewayAttachedNpCount", 0), + attribute.Int64("NginxPodCount", 0), + attribute.Int64("ControlPlanePodCount", 0), } result := data.Attributes() diff --git a/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go b/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go index baddcd174d..3073f15eb4 100644 --- a/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go +++ b/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go @@ -27,6 +27,7 @@ func (d *NGFResourceCounts) Attributes() []attribute.KeyValue { attrs = append(attrs, attribute.Int64("NginxProxyCount", d.NginxProxyCount)) attrs = append(attrs, attribute.Int64("SnippetsFilterCount", d.SnippetsFilterCount)) attrs = append(attrs, attribute.Int64("UpstreamSettingsPolicyCount", d.UpstreamSettingsPolicyCount)) + attrs = append(attrs, attribute.Int64("GatewayAttachedNpCount", d.GatewayAttachedNpCount)) return attrs } diff --git a/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go b/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go index a56fce8f7b..8650078dc7 100644 --- a/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go +++ b/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go @@ -9,21 +9,21 @@ import ( ) type FakeConfigurationGetter struct { - GetLatestConfigurationStub func() *dataplane.Configuration + GetLatestConfigurationStub func() []*dataplane.Configuration getLatestConfigurationMutex sync.RWMutex getLatestConfigurationArgsForCall []struct { } getLatestConfigurationReturns struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration } getLatestConfigurationReturnsOnCall map[int]struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeConfigurationGetter) GetLatestConfiguration() *dataplane.Configuration { +func (fake *FakeConfigurationGetter) GetLatestConfiguration() []*dataplane.Configuration { fake.getLatestConfigurationMutex.Lock() ret, specificReturn := fake.getLatestConfigurationReturnsOnCall[len(fake.getLatestConfigurationArgsForCall)] fake.getLatestConfigurationArgsForCall = append(fake.getLatestConfigurationArgsForCall, struct { @@ -47,32 +47,32 @@ func (fake *FakeConfigurationGetter) GetLatestConfigurationCallCount() int { return len(fake.getLatestConfigurationArgsForCall) } -func (fake *FakeConfigurationGetter) GetLatestConfigurationCalls(stub func() *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationCalls(stub func() []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = stub } -func (fake *FakeConfigurationGetter) GetLatestConfigurationReturns(result1 *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationReturns(result1 []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = nil fake.getLatestConfigurationReturns = struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }{result1} } -func (fake *FakeConfigurationGetter) GetLatestConfigurationReturnsOnCall(i int, result1 *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationReturnsOnCall(i int, result1 []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = nil if fake.getLatestConfigurationReturnsOnCall == nil { fake.getLatestConfigurationReturnsOnCall = make(map[int]struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }) } fake.getLatestConfigurationReturnsOnCall[i] = struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }{result1} } diff --git a/scripts/generate-manifests.sh b/scripts/generate-manifests.sh index 731b359272..ca3e9c2041 100755 --- a/scripts/generate-manifests.sh +++ b/scripts/generate-manifests.sh @@ -30,10 +30,3 @@ done # For OpenShift, we don't need a Helm example so we generate the manifests from the default values.yaml generate_manifests openshift - -# FIXME(lucacome): Implement a better way to generate the static deployment file -# https://github.com/nginx/nginx-gateway-fabric/issues/2326 -helm template nginx-gateway charts/nginx-gateway-fabric --set nameOverride=nginx-gateway --set metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml >config/tests/static-deployment.yaml -sed -i.bak '/app.kubernetes.io\/managed-by: Helm/d' config/tests/static-deployment.yaml -sed -i.bak '/helm.sh/d' config/tests/static-deployment.yaml -rm -f config/tests/static-deployment.yaml.bak diff --git a/tests/Makefile b/tests/Makefile index c95f337d19..eec81e0ad7 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -13,8 +13,7 @@ GW_SVC_GKE_INTERNAL = false NGF_VERSION ?= edge## NGF version to be tested PULL_POLICY = Never## Pull policy for the images NGINX_CONF_DIR = internal/mode/static/nginx/conf -PROVISIONER_MANIFEST = conformance/provisioner/provisioner.yaml -SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation +SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation,GatewayInfrastructurePropagation STANDARD_CONFORMANCE_PROFILES = GATEWAY-HTTP,GATEWAY-GRPC EXPERIMENTAL_CONFORMANCE_PROFILES = GATEWAY-TLS CONFORMANCE_PROFILES = $(STANDARD_CONFORMANCE_PROFILES) # by default we use the standard conformance profiles. If experimental is enabled we override this and add the experimental profiles. @@ -45,7 +44,7 @@ build-test-runner-image: ## Build conformance test runner image .PHONY: build-crossplane-image build-crossplane-image: ## Build the crossplane image - docker build --build-arg NGINX_CONF_DIR=$(NGINX_CONF_DIR) -t nginx-crossplane:latest -f framework/crossplane/Dockerfile .. + docker build --platform $(GOOS)/$(GOARCH) --build-arg NGINX_CONF_DIR=$(NGINX_CONF_DIR) -t nginx-crossplane:latest -f framework/crossplane/Dockerfile .. .PHONY: run-conformance-tests run-conformance-tests: ## Run conformance tests @@ -74,9 +73,6 @@ cleanup-conformance-tests: ## Clean up conformance tests fixtures kubectl delete pod conformance kubectl delete -f conformance/conformance-rbac.yaml -.PHONY: build -build: generate-static-deployment - .PHONY: reset-go-modules reset-go-modules: ## Reset the go modules changes git checkout -- ../go.mod ../go.sum @@ -109,7 +105,9 @@ sync-files-to-vm: ## Syncs your local NGF files with the NGF repo on the VM ./scripts/sync-files-to-vm.sh .PHONY: nfr-test -nfr-test: check-for-plus-usage-endpoint ## Run the NFR tests on a GCP VM +nfr-test: GOARCH=amd64 +nfr-test: check-for-plus-usage-endpoint build-crossplane-image ## Run the NFR tests on a GCP VM + ./scripts/push-crossplane-image.sh CI=$(CI) ./scripts/run-tests-gcp-vm.sh .PHONY: start-longevity-test @@ -169,16 +167,7 @@ delete-gke-cluster: ## Delete the GKE cluster add-local-ip-to-cluster: ## Add local IP to the GKE cluster master-authorized-networks ./scripts/add-local-ip-auth-networks.sh -HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginxGateway.kind=skip --set service.create=false --skip-schema-validation - -.PHONY: deploy-updated-provisioner -deploy-updated-provisioner: ## Update provisioner manifest and deploy to the configured kind cluster - yq '(select(di != 3))' $(PROVISIONER_MANIFEST) | kubectl apply -f - - yq '(select(.spec.template.spec.containers[].image) | .spec.template.spec.containers[].image="$(PREFIX):$(TAG)" | .spec.template.spec.containers[].imagePullPolicy = "Never")' $(PROVISIONER_MANIFEST) | kubectl apply -f - - -.PHONY: generate-static-deployment -generate-static-deployment: - helm template nginx-gateway $(CHART_DIR) --set nameOverride=nginx-gateway --set metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=$(NGINX_PREFIX) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) --set nginx.plus=$(PLUS_ENABLED) --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) > $(SELF_DIR)config/tests/static-deployment.yaml +HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginx.service.type=ClusterIP --skip-schema-validation # this target is used to install the gateway-api CRDs from the main branch (only used in the nightly CI job) # it overrides the target in the main Makefile when the GW_API_VERSION is set to main @@ -187,27 +176,15 @@ install-gateway-crds: kubectl kustomize "https://github.com/kubernetes-sigs/gateway-api/config/crd/$(if $(filter true,$(ENABLE_EXPERIMENTAL)),experimental,)?timeout=120&ref=main" | kubectl apply -f - endif -.PHONY: install-ngf-local-build -install-ngf-local-build: deploy-updated-provisioner - .PHONY: install-ngf-local-no-build -install-ngf-local-no-build: load-images helm-install-local deploy-updated-provisioner ## Install NGF from local build with provisioner on configured kind cluster but do not build the NGF image - -.PHONY: install-ngf-local-build-with-plus -install-ngf-local-build-with-plus: deploy-updated-provisioner +install-ngf-local-no-build: load-images helm-install-local ## Install NGF from local build on configured kind cluster but do not build the NGF image .PHONY: install-ngf-local-no-build-with-plus -install-ngf-local-no-build-with-plus: load-images-with-plus helm-install-local-with-plus deploy-updated-provisioner ## Install NGF with Plus from local build with provisioner on configured kind cluster but do not build the NGF image - -.PHONY: install-ngf-edge -install-ngf-edge: load-images helm-install-local ## Install NGF with provisioner from edge on configured kind cluster - kubectl apply -f $(PROVISIONER_MANIFEST) +install-ngf-local-no-build-with-plus: load-images-with-plus helm-install-local-with-plus ## Install NGF with Plus from local build on configured kind cluster but do not build the NGF image .PHONY: uninstall-ngf -uninstall-ngf: ## Uninstall NGF on configured kind cluster and undo manifest changes +uninstall-ngf: ## Uninstall NGF on configured kind cluster -helm uninstall nginx-gateway -n nginx-gateway -make uninstall-gateway-crds - -kubectl delete clusterrole nginx-gateway-provisioner - -kubectl delete clusterrolebinding nginx-gateway-provisioner -kubectl delete namespace nginx-gateway -kubectl kustomize ../config/crd | kubectl delete -f - diff --git a/tests/README.md b/tests/README.md index 1db86ebc3b..16df0bc36a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -19,7 +19,6 @@ This directory contains the tests for NGINX Gateway Fabric. The tests are divide - [Step 1 - Install NGINX Gateway Fabric to configured kind cluster](#step-1---install-nginx-gateway-fabric-to-configured-kind-cluster) - [Option 1 - Build and install NGINX Gateway Fabric from local to configured kind cluster](#option-1---build-and-install-nginx-gateway-fabric-from-local-to-configured-kind-cluster) - [Option 2 - Install NGINX Gateway Fabric from local already built image to configured kind cluster](#option-2---install-nginx-gateway-fabric-from-local-already-built-image-to-configured-kind-cluster) - - [Option 3 - Install NGINX Gateway Fabric from edge to configured kind cluster](#option-3---install-nginx-gateway-fabric-from-edge-to-configured-kind-cluster) - [Step 2 - Build conformance test runner image](#step-2---build-conformance-test-runner-image) - [Step 3 - Run Gateway conformance tests](#step-3---run-gateway-conformance-tests) - [Step 4 - Cleanup the conformance test fixtures and uninstall NGINX Gateway Fabric](#step-4---cleanup-the-conformance-test-fixtures-and-uninstall-nginx-gateway-fabric) @@ -158,15 +157,6 @@ Or, to install NGF with NGINX Plus enabled: make install-ngf-local-no-build-with-plus ``` -#### Option 3 - Install NGINX Gateway Fabric from edge to configured kind cluster - -You can also skip the build NGF image step and prepare the environment to instead use the `edge` image. Note that this -option does not currently support installing with NGINX Plus enabled. - -```makefile -make install-ngf-edge -``` - ### Step 2 - Build conformance test runner image > Note: If you want to run the latest conformance tests from the Gateway API `main` branch, run the following diff --git a/tests/conformance/conformance-rbac.yaml b/tests/conformance/conformance-rbac.yaml index c1c0d54185..26572dbe94 100644 --- a/tests/conformance/conformance-rbac.yaml +++ b/tests/conformance/conformance-rbac.yaml @@ -16,6 +16,7 @@ rules: - pods - secrets - services + - serviceaccounts verbs: - create - delete diff --git a/tests/conformance/provisioner/README.md b/tests/conformance/provisioner/README.md deleted file mode 100644 index c9f5bc36ae..0000000000 --- a/tests/conformance/provisioner/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Provisioner - -Provisioner implements data plane provisioning for NGINX Gateway Fabric (NGF): it creates an NGF static mode -Deployment for each Gateway that belongs to the provisioner GatewayClass. - -```text -Usage: - gateway provisioner-mode [flags] - -Flags: - -h, --help help for provisioner-mode - -Global Flags: - --gateway-ctlr-name string The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain is 'gateway.nginx.org' (default "") - --gatewayclass string The name of the GatewayClass resource. Every NGINX Gateway Fabric must have a unique corresponding GatewayClass resource. (default "") -``` - -> Note: Provisioner is not ready for production yet (see this issue for more details -https://github.com/nginx/nginx-gateway-fabric/issues/634). However, it can be used in the Gateway API conformance -tests, which expect a Gateway API implementation to provision an independent data plane per Gateway. -> -> Note: Provisioner uses [this manifest](https://github.com/nginx/nginx-gateway-fabric/blob/main/config/tests/static-deployment.yaml) -to create an NGF static mode Deployment. -> This manifest gets included into the NGF binary during the NGF build. To customize the Deployment, modify the -manifest and **re-build** NGF. - -How to deploy: - -1. Follow the [installation](https://docs.nginx.com/nginx-gateway-fabric/installation/) instructions up until the Deploy the NGINX Gateway Fabric step - to deploy prerequisites for both the static mode Deployments and the provisioner. -1. Deploy provisioner: - - ```shell - kubectl apply -f provisioner.yaml - ``` - -1. Confirm the provisioner is running in nginx-gateway namespace: - - ```shell - kubectl get pods -n nginx-gateway - ``` - - ```text - - NAME READY STATUS RESTARTS AGE - nginx-gateway-provisioner-6c9d9fdcb8-b2pf8 1/1 Running 0 11m - ``` diff --git a/tests/conformance/provisioner/provisioner.yaml b/tests/conformance/provisioner/provisioner.yaml deleted file mode 100644 index 07ebae4a45..0000000000 --- a/tests/conformance/provisioner/provisioner.yaml +++ /dev/null @@ -1,79 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx-gateway-provisioner - namespace: nginx-gateway ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nginx-gateway-provisioner -rules: -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses - - gateways - verbs: - - list - - watch -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses/status - verbs: - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nginx-gateway-provisioner -subjects: -- kind: ServiceAccount - name: nginx-gateway-provisioner - namespace: nginx-gateway -roleRef: - kind: ClusterRole - name: nginx-gateway-provisioner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-gateway-provisioner - namespace: nginx-gateway -spec: - replicas: 1 - selector: - matchLabels: - app: nginx-gateway-provisioner - template: - metadata: - labels: - app: nginx-gateway-provisioner - spec: - serviceAccountName: nginx-gateway-provisioner - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway-provisioner - securityContext: - runAsUser: 1001 - args: - - provisioner-mode - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx diff --git a/tests/framework/collector.go b/tests/framework/collector.go index e5e4d3f377..61e24ba770 100644 --- a/tests/framework/collector.go +++ b/tests/framework/collector.go @@ -28,6 +28,14 @@ func InstallCollector() ([]byte, error) { return output, err } + if output, err := exec.Command( + "helm", + "repo", + "update", + ).CombinedOutput(); err != nil { + return output, fmt.Errorf("failed to update helm repos: %w; output: %s", err, string(output)) + } + args := []string{ "install", collectorChartReleaseName, diff --git a/tests/framework/crossplane.go b/tests/framework/crossplane.go index 81f47e3567..02a16b6cb5 100644 --- a/tests/framework/crossplane.go +++ b/tests/framework/crossplane.go @@ -38,6 +38,8 @@ type ExpectedNginxField struct { ValueSubstringAllowed bool } +const crossplaneImageName = "nginx-crossplane:latest" + // ValidateNginxFieldExists accepts the nginx config and the configuration for the expected field, // and returns whether or not that field exists where it should. func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) error { @@ -144,11 +146,17 @@ func injectCrossplaneContainer( k8sClient kubernetes.Interface, timeout time.Duration, ngfPodName, - namespace string, + namespace, + crossplaneImageRepo string, ) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() + image := crossplaneImageName + if crossplaneImageRepo != "" { + image = crossplaneImageRepo + "/" + image + } + pod := &core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: ngfPodName, @@ -160,8 +168,8 @@ func injectCrossplaneContainer( TargetContainerName: "nginx", EphemeralContainerCommon: core.EphemeralContainerCommon{ Name: "crossplane", - Image: "nginx-crossplane:latest", - ImagePullPolicy: "Never", + Image: image, + ImagePullPolicy: "IfNotPresent", Stdin: true, VolumeMounts: []core.VolumeMount{ { @@ -203,7 +211,7 @@ func injectCrossplaneContainer( func createCrossplaneExecutor( k8sClient kubernetes.Interface, k8sConfig *rest.Config, - ngfPodName, + nginxPodName, namespace string, ) (remotecommand.Executor, error) { cmd := []string{"./crossplane", "/etc/nginx/nginx.conf"} @@ -217,7 +225,7 @@ func createCrossplaneExecutor( req := k8sClient.CoreV1().RESTClient().Post(). Resource("pods"). SubResource("exec"). - Name(ngfPodName). + Name(nginxPodName). Namespace(namespace). VersionedParams(opts, scheme.ParameterCodec) diff --git a/tests/framework/info.go b/tests/framework/info.go index 588b728631..c485edc9aa 100644 --- a/tests/framework/info.go +++ b/tests/framework/info.go @@ -4,6 +4,7 @@ import ( "fmt" "runtime/debug" + . "github.com/onsi/ginkgo/v2" core "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -81,3 +82,27 @@ func GetBuildInfo() (commitHash string, commitTime string, dirtyBuild string) { return } + +// AddNginxLogsAndEventsToReport adds nginx logs and events from the namespace to the report if the spec failed. +func AddNginxLogsAndEventsToReport(rm ResourceManager, namespace string) { + if CurrentSpecReport().Failed() { + var returnLogs string + + nginxPodNames, _ := GetReadyNginxPodNames(rm.K8sClient, namespace, rm.TimeoutConfig.GetStatusTimeout) + + for _, nginxPodName := range nginxPodNames { + returnLogs += fmt.Sprintf("Logs for Nginx Pod %s:\n", nginxPodName) + nginxLogs, _ := rm.GetPodLogs( + namespace, + nginxPodName, + &core.PodLogOptions{Container: "nginx"}, + ) + + returnLogs += fmt.Sprintf(" %s\n", nginxLogs) + } + AddReportEntry("Nginx Logs", returnLogs, ReportEntryVisibilityNever) + + events := GetEvents(rm, namespace) + AddReportEntry("Test Events", events, ReportEntryVisibilityNever) + } +} diff --git a/tests/framework/ngf.go b/tests/framework/ngf.go index 45a1eeecc9..48cd1b9ed7 100644 --- a/tests/framework/ngf.go +++ b/tests/framework/ngf.go @@ -230,7 +230,7 @@ func setImageArgs(cfg InstallationConfig) []string { } if cfg.ServiceType != "" { - args = append(args, formatValueSet("service.type", cfg.ServiceType)...) + args = append(args, formatValueSet("nginx.service.type", cfg.ServiceType)...) if cfg.ServiceType == "LoadBalancer" && cfg.IsGKEInternalLB { args = append( args, diff --git a/tests/framework/portforward.go b/tests/framework/portforward.go index 26cd4b3cfb..500dc354aa 100644 --- a/tests/framework/portforward.go +++ b/tests/framework/portforward.go @@ -52,13 +52,13 @@ func PortForward(config *rest.Config, namespace, podName string, ports []string, for { if err := forward(); err != nil { slog.Error("error forwarding ports", "error", err) - slog.Info("retrying port forward in 100ms...") + slog.Info("retrying port forward in 1s...") } select { case <-stopCh: return - case <-time.After(100 * time.Millisecond): + case <-time.After(1 * time.Second): // retrying } } diff --git a/tests/framework/prometheus.go b/tests/framework/prometheus.go index fd7bf44624..3c3094712b 100644 --- a/tests/framework/prometheus.go +++ b/tests/framework/prometheus.go @@ -302,119 +302,6 @@ type Bucket struct { Val int } -// GetReloadCount gets the total number of nginx reloads. -func GetReloadCount(promInstance PrometheusInstance, ngfPodName string) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadCountWithStartTime gets the total number of nginx reloads from a start time to the current time. -func GetReloadCountWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadErrsCountWithStartTime gets the total number of nginx reload errors from a start time to the current time. -func GetReloadErrsCountWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reload_errors_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reload_errors_total{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadAvgTime gets the average time in milliseconds for nginx to reload. -func GetReloadAvgTime(promInstance PrometheusInstance, ngfPodName string) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"}`+ - ` / `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadAvgTimeWithStartTime gets the average time in milliseconds for nginx to reload using a start time -// to the current time to calculate. -func GetReloadAvgTimeWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `(nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"} @ %[2]d)`+ - ` / `+ - `(nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"} @ %[2]d)`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadBuckets gets the Buckets in millisecond intervals for nginx reloads. -func GetReloadBuckets(promInstance PrometheusInstance, ngfPodName string) ([]Bucket, error) { - return getBuckets( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadBucketsWithStartTime gets the Buckets in millisecond intervals for nginx reloads from a start time -// to the current time. -func GetReloadBucketsWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) ([]Bucket, error) { - return getBuckets( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - // GetEventsCount gets the NGF event batch processing count. func GetEventsCount(promInstance PrometheusInstance, ngfPodName string) (float64, error) { return getFirstValueOfVector( diff --git a/tests/framework/resourcemanager.go b/tests/framework/resourcemanager.go index 434a5ecaed..f398e97375 100644 --- a/tests/framework/resourcemanager.go +++ b/tests/framework/resourcemanager.go @@ -46,6 +46,8 @@ import ( "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" ) // ResourceManager handles creating/updating/deleting Kubernetes resources. @@ -647,6 +649,44 @@ func (rm *ResourceManager) GetNGFDeployment(namespace, releaseName string) (*app return &deployment, nil } +func (rm *ResourceManager) getGatewayClassNginxProxy( + namespace, + releaseName string, +) (*ngfAPIv1alpha2.NginxProxy, error) { + ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.GetTimeout) + defer cancel() + + var proxy ngfAPIv1alpha2.NginxProxy + proxyName := releaseName + "-proxy-config" + + if err := rm.K8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: proxyName}, &proxy); err != nil { + return nil, err + } + + return &proxy, nil +} + +// ScaleNginxDeployment scales the Nginx Deployment to the specified number of replicas. +func (rm *ResourceManager) ScaleNginxDeployment(namespace, releaseName string, replicas int32) error { + ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.UpdateTimeout) + defer cancel() + + // If there is another NginxProxy which "overrides" the gateway class one, then this won't work and + // may need refactoring. + proxy, err := rm.getGatewayClassNginxProxy(namespace, releaseName) + if err != nil { + return fmt.Errorf("error getting NginxProxy: %w", err) + } + + proxy.Spec.Kubernetes.Deployment.Replicas = &replicas + + if err = rm.K8sClient.Update(ctx, proxy); err != nil { + return fmt.Errorf("error updating NginxProxy: %w", err) + } + + return nil +} + // GetEvents returns all Events in the specified namespace. func (rm *ResourceManager) GetEvents(namespace string) (*core.EventList, error) { ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.GetTimeout) @@ -692,31 +732,84 @@ func GetReadyNGFPodNames( ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - var podList core.PodList - if err := k8sClient.List( + var ngfPodNames []string + + err := wait.PollUntilContextCancel( ctx, - &podList, - client.InNamespace(namespace), - client.MatchingLabels{ - "app.kubernetes.io/instance": releaseName, + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + var podList core.PodList + if err := k8sClient.List( + ctx, + &podList, + client.InNamespace(namespace), + client.MatchingLabels{ + "app.kubernetes.io/instance": releaseName, + }, + ); err != nil { + return false, fmt.Errorf("error getting list of NGF Pods: %w", err) + } + + ngfPodNames = getReadyPodNames(podList) + return len(ngfPodNames) > 0, nil }, - ); err != nil { - return nil, fmt.Errorf("error getting list of Pods: %w", err) + ) + if err != nil { + return nil, fmt.Errorf("timed out waiting for NGF Pods to be ready: %w", err) } - if len(podList.Items) > 0 { - var names []string - for _, pod := range podList.Items { - for _, cond := range pod.Status.Conditions { - if cond.Type == core.PodReady && cond.Status == core.ConditionTrue { - names = append(names, pod.Name) - } + return ngfPodNames, nil +} + +// GetReadyNginxPodNames returns the name(s) of the NGINX Pod(s). +func GetReadyNginxPodNames( + k8sClient client.Client, + namespace string, + timeout time.Duration, +) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var nginxPodNames []string + + err := wait.PollUntilContextCancel( + ctx, + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + var podList core.PodList + if err := k8sClient.List( + ctx, + &podList, + client.InNamespace(namespace), + client.HasLabels{"gateway.networking.k8s.io/gateway-name"}, + ); err != nil { + return false, fmt.Errorf("error getting list of NGINX Pods: %w", err) + } + + nginxPodNames = getReadyPodNames(podList) + return len(nginxPodNames) > 0, nil + }, + ) + if err != nil { + return nil, fmt.Errorf("timed out waiting for NGINX Pods to be ready: %w", err) + } + + return nginxPodNames, nil +} + +func getReadyPodNames(podList core.PodList) []string { + var names []string + for _, pod := range podList.Items { + for _, cond := range pod.Status.Conditions { + if cond.Type == core.PodReady && cond.Status == core.ConditionTrue { + names = append(names, pod.Name) } } - return names, nil } - return nil, errors.New("unable to find NGF Pod(s)") + return names } func countNumberOfReadyParents(parents []v1.RouteParentStatus) int { @@ -733,34 +826,7 @@ func countNumberOfReadyParents(parents []v1.RouteParentStatus) int { return readyCount } -func (rm *ResourceManager) WaitForAppsToBeReadyWithPodCount(namespace string, podCount int) error { - ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.CreateTimeout) - defer cancel() - - return rm.WaitForAppsToBeReadyWithCtxWithPodCount(ctx, namespace, podCount) -} - -func (rm *ResourceManager) WaitForAppsToBeReadyWithCtxWithPodCount( - ctx context.Context, - namespace string, - podCount int, -) error { - if err := rm.WaitForPodsToBeReadyWithCount(ctx, namespace, podCount); err != nil { - return err - } - - if err := rm.waitForHTTPRoutesToBeReady(ctx, namespace); err != nil { - return err - } - - if err := rm.waitForGRPCRoutesToBeReady(ctx, namespace); err != nil { - return err - } - - return rm.waitForGatewaysToBeReady(ctx, namespace) -} - -// WaitForPodsToBeReady waits for all Pods in the specified namespace to be ready or +// WaitForPodsToBeReadyWithCount waits for all Pods in the specified namespace to be ready or // until the provided context is canceled. func (rm *ResourceManager) WaitForPodsToBeReadyWithCount(ctx context.Context, namespace string, count int) error { return wait.PollUntilContextCancel( @@ -817,17 +883,19 @@ func (rm *ResourceManager) WaitForGatewayObservedGeneration( } // GetNginxConfig uses crossplane to get the nginx configuration and convert it to JSON. -func (rm *ResourceManager) GetNginxConfig(ngfPodName, namespace string) (*Payload, error) { +// If the crossplane image is loaded locally on the node, crossplaneImageRepo can be empty. +func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace, crossplaneImageRepo string) (*Payload, error) { if err := injectCrossplaneContainer( rm.ClientGoClient, rm.TimeoutConfig.UpdateTimeout, - ngfPodName, + nginxPodName, namespace, + crossplaneImageRepo, ); err != nil { return nil, err } - exec, err := createCrossplaneExecutor(rm.ClientGoClient, rm.K8sConfig, ngfPodName, namespace) + exec, err := createCrossplaneExecutor(rm.ClientGoClient, rm.K8sConfig, nginxPodName, namespace) if err != nil { return nil, err } diff --git a/tests/framework/timeout.go b/tests/framework/timeout.go index 956b1699f3..8d8557622f 100644 --- a/tests/framework/timeout.go +++ b/tests/framework/timeout.go @@ -43,7 +43,7 @@ func DefaultTimeoutConfig() TimeoutConfig { CreateTimeout: 60 * time.Second, UpdateTimeout: 60 * time.Second, DeleteTimeout: 10 * time.Second, - DeleteNamespaceTimeout: 60 * time.Second, + DeleteNamespaceTimeout: 90 * time.Second, GetTimeout: 10 * time.Second, ManifestFetchTimeout: 10 * time.Second, RequestTimeout: 10 * time.Second, diff --git a/tests/go.mod b/tests/go.mod index 3ce2b439a9..fd2c7a72ea 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -8,8 +8,8 @@ require ( github.com/nginx/nginx-gateway-fabric v0.0.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.1 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.62.0 github.com/tsenart/vegeta/v12 v12.12.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -26,7 +26,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -45,7 +45,6 @@ require ( github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/miekg/dns v1.1.65 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -56,7 +55,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/testify v1.10.0 // indirect diff --git a/tests/go.sum b/tests/go.sum index c69d6c4a8d..82c4ed9185 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -18,8 +18,8 @@ github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -67,8 +67,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -103,14 +103,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8= diff --git a/tests/scripts/push-crossplane-image.sh b/tests/scripts/push-crossplane-image.sh new file mode 100755 index 0000000000..31bd06d2f6 --- /dev/null +++ b/tests/scripts/push-crossplane-image.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eo pipefail + +source scripts/vars.env + +docker tag nginx-crossplane:latest us-docker.pkg.dev/$GKE_PROJECT/nginx-gateway-fabric/nginx-crossplane:latest +docker push us-docker.pkg.dev/$GKE_PROJECT/nginx-gateway-fabric/nginx-crossplane:latest diff --git a/tests/suite/advanced_routing_test.go b/tests/suite/advanced_routing_test.go index a58c9a7f7e..cffc5bad5b 100644 --- a/tests/suite/advanced_routing_test.go +++ b/tests/suite/advanced_routing_test.go @@ -39,9 +39,18 @@ var _ = Describe("AdvancedRouting", Ordered, Label("functional", "routing"), fun Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index f1f12304ee..1f6293e6b5 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -32,6 +32,8 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" } namespace = "clientsettings" + + nginxPodName string ) BeforeAll(func() { @@ -44,9 +46,20 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -91,18 +104,36 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" } }) + Context("verify working traffic", func() { + It("should return a 200 response for HTTPRoutes", func() { + baseCoffeeURL := baseURL + "/coffee" + baseTeaURL := baseURL + "/tea" + + Eventually( + func() error { + return expectRequestToSucceed(baseCoffeeURL, address, "URI: /coffee") + }). + WithTimeout(timeoutConfig.RequestTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + Eventually( + func() error { + return expectRequestToSucceed(baseTeaURL, address, "URI: /tea") + }). + WithTimeout(timeoutConfig.RequestTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + }) + }) + Context("nginx config", func() { var conf *framework.Payload filePrefix := fmt.Sprintf("/etc/nginx/includes/ClientSettingsPolicy_%s", namespace) BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) @@ -248,13 +279,12 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" When("a ClientSettingsPolicy targets an invalid resources", func() { Specify("their accepted condition is set to TargetNotFound", func() { files := []string{ - "clientsettings/ignored-gateway.yaml", - "clientsettings/invalid-csp.yaml", + "clientsettings/invalid-route-csp.yaml", } Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) - nsname := types.NamespacedName{Name: "invalid-csp", Namespace: namespace} + nsname := types.NamespacedName{Name: "invalid-route-csp", Namespace: namespace} Expect(waitForCSPolicyToHaveTargetNotFoundAcceptedCond(nsname)).To(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) diff --git a/tests/suite/dataplane_perf_test.go b/tests/suite/dataplane_perf_test.go index 5604b0188f..adedebf05f 100644 --- a/tests/suite/dataplane_perf_test.go +++ b/tests/suite/dataplane_perf_test.go @@ -18,44 +18,47 @@ import ( ) var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), func() { - files := []string{ - "dp-perf/coffee.yaml", - "dp-perf/gateway.yaml", - "dp-perf/cafe-routes.yaml", - } - - var ns core.Namespace - - var addr string - targetURL := "http://cafe.example.com" - var outFile *os.File - - t1 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/latte"), - } - t2 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/coffee"), - Header: http.Header{"version": []string{"v2"}}, - } - t3 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/coffee?TEST=v2"), - } - t4 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/tea"), - } - t5 := framework.Target{ - Method: "POST", - URL: fmt.Sprintf("%s%s", targetURL, "/tea"), - } + var ( + files = []string{ + "dp-perf/coffee.yaml", + "dp-perf/gateway.yaml", + "dp-perf/cafe-routes.yaml", + } + + namespace = "dp-perf" + + targetURL = "http://cafe.example.com" + + t1 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/latte"), + } + t2 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/coffee"), + Header: http.Header{"version": []string{"v2"}}, + } + t3 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/coffee?TEST=v2"), + } + t4 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/tea"), + } + t5 = framework.Target{ + Method: "POST", + URL: fmt.Sprintf("%s%s", targetURL, "/tea"), + } + + outFile *os.File + addr string + ) BeforeAll(func() { - ns = core.Namespace{ + ns := core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "dp-perf", + Name: namespace, }, } @@ -63,6 +66,12 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) + port := ":80" if portFwdPort != 0 { port = fmt.Sprintf(":%s", strconv.Itoa(portFwdPort)) @@ -79,8 +88,11 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), }) AfterAll(func() { - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() + + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) outFile.Close() }) diff --git a/tests/suite/graceful_recovery_test.go b/tests/suite/graceful_recovery_test.go index 33c3c447d0..c2987610f7 100644 --- a/tests/suite/graceful_recovery_test.go +++ b/tests/suite/graceful_recovery_test.go @@ -17,7 +17,6 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctlr "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -29,56 +28,162 @@ const ( ngfContainerName = "nginx-gateway" ) -// Since checkNGFContainerLogsForErrors may experience interference from previous tests (as explained in the function -// documentation), this test is recommended to be run separate from other tests. +// Since this test involves restarting of the test node, it is recommended to be run separate from other tests +// such that any issues in this test do not interfere with other tests. var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), func() { - files := []string{ - "graceful-recovery/cafe.yaml", - "graceful-recovery/cafe-secret.yaml", - "graceful-recovery/gateway.yaml", - "graceful-recovery/cafe-routes.yaml", + var ( + files = []string{ + "graceful-recovery/cafe.yaml", + "graceful-recovery/cafe-secret.yaml", + "graceful-recovery/gateway.yaml", + "graceful-recovery/cafe-routes.yaml", + } + + ns core.Namespace + + baseHTTPURL = "http://cafe.example.com" + baseHTTPSURL = "https://cafe.example.com" + teaURL = baseHTTPSURL + "/tea" + coffeeURL = baseHTTPURL + "/coffee" + + activeNGFPodName, activeNginxPodName string + ) + + checkForWorkingTraffic := func(teaURL, coffeeURL string) error { + if err := expectRequestToSucceed(teaURL, address, "URI: /tea"); err != nil { + return err + } + if err := expectRequestToSucceed(coffeeURL, address, "URI: /coffee"); err != nil { + return err + } + return nil } - var ns core.Namespace + checkForFailingTraffic := func(teaURL, coffeeURL string) error { + if err := expectRequestToFail(teaURL, address); err != nil { + return err + } + if err := expectRequestToFail(coffeeURL, address); err != nil { + return err + } + return nil + } - baseHTTPURL := "http://cafe.example.com" - baseHTTPSURL := "https://cafe.example.com" - teaURL := baseHTTPSURL + "/tea" - coffeeURL := baseHTTPURL + "/coffee" + getContainerRestartCount := func(podName, namespace, containerName string) (int, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() - var ngfPodName string + var pod core.Pod + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: podName}, &pod); err != nil { + return 0, fmt.Errorf("error retrieving Pod: %w", err) + } - BeforeEach(func() { - // this test is unique in that it will check the entire log of both ngf and nginx containers - // for any errors, so in order to avoid errors generated in previous tests we will uninstall - // NGF installed at the suite level, then re-deploy our own. We will also uninstall and re-install - // NGF between each graceful-recovery test for the same reason. - teardown(releaseName) + var restartCount int + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == containerName { + restartCount = int(containerStatus.RestartCount) + } + } - setup(getDefaultSetupCfg()) + return restartCount, nil + } - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) + checkContainerRestart := func(podName, containerName, namespace string, currentRestartCount int) error { + restartCount, err := getContainerRestartCount(podName, namespace, containerName) + if err != nil { + return err + } - ngfPodName = podNames[0] - if portFwdPort != 0 { - coffeeURL = fmt.Sprintf("%s:%d/coffee", baseHTTPURL, portFwdPort) + if restartCount != currentRestartCount+1 { + return fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", + restartCount, currentRestartCount+1) } - if portFwdHTTPSPort != 0 { - teaURL = fmt.Sprintf("%s:%d/tea", baseHTTPSURL, portFwdHTTPSPort) + + return nil + } + + getNodeNames := func() ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() + var nodes core.NodeList + + if err := k8sClient.List(ctx, &nodes); err != nil { + return nil, fmt.Errorf("error listing nodes: %w", err) } - ns = core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "graceful-recovery", - }, + names := make([]string, 0, len(nodes.Items)) + + for _, node := range nodes.Items { + names = append(names, node.Name) } - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReadyWithPodCount(ns.Name, 2)).To(Succeed()) + return names, nil + } + runNodeDebuggerJob := func(nginxPodName, jobScript string) (*v1.Job, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() + + var nginxPod core.Pod + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: nginxPodName}, &nginxPod); err != nil { + return nil, fmt.Errorf("error retrieving NGF Pod: %w", err) + } + + b, err := resourceManager.GetFileContents("graceful-recovery/node-debugger-job.yaml") + if err != nil { + return nil, fmt.Errorf("error processing node debugger job file: %w", err) + } + + job := &v1.Job{} + if err = yaml.Unmarshal(b.Bytes(), job); err != nil { + return nil, fmt.Errorf("error with yaml unmarshal: %w", err) + } + + job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] = nginxPod.Spec.NodeName + if len(job.Spec.Template.Spec.Containers) != 1 { + return nil, fmt.Errorf( + "expected node debugger job to contain one container, actual number: %d", + len(job.Spec.Template.Spec.Containers), + ) + } + job.Spec.Template.Spec.Containers[0].Args = []string{jobScript} + job.Namespace = ns.Name + + if err = resourceManager.Apply([]client.Object{job}); err != nil { + return nil, fmt.Errorf("error in applying job: %w", err) + } + + return job, nil + } + + restartNginxContainer := func(nginxPodName, namespace, containerName string) { + jobScript := "PID=$(pgrep -f \"nginx-agent\") && kill -9 $PID" + + restartCount, err := getContainerRestartCount(nginxPodName, namespace, containerName) + Expect(err).ToNot(HaveOccurred()) + + cleanUpPortForward() + job, err := runNodeDebuggerJob(nginxPodName, jobScript) + Expect(err).ToNot(HaveOccurred()) + + Eventually( + func() error { + return checkContainerRestart(nginxPodName, containerName, namespace, restartCount) + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + // default propagation policy is metav1.DeletePropagationOrphan which does not delete the underlying + // pod created through the job after the job is deleted. Setting it to metav1.DeletePropagationBackground + // deletes the underlying pod after the job is deleted. + Expect(resourceManager.Delete( + []client.Object{job}, + client.PropagationPolicy(metav1.DeletePropagationBackground), + )).To(Succeed()) + } + + checkNGFFunctionality := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { Eventually( func() error { return checkForWorkingTraffic(teaURL, coffeeURL) @@ -86,212 +191,334 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), WithTimeout(timeoutConfig.TestForTrafficTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) - }) - AfterAll(func() { + cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) - }) - It("recovers when NGF container is restarted", func() { - runRecoveryTest(teaURL, coffeeURL, ngfPodName, ngfContainerName, files, &ns) - }) + Eventually( + func() error { + return checkForFailingTraffic(teaURL, coffeeURL) + }). + WithTimeout(timeoutConfig.TestForTrafficTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) - It("recovers when nginx container is restarted", func() { - runRecoveryTest(teaURL, coffeeURL, ngfPodName, nginxContainerName, files, &ns) - }) + Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - It("recovers when drained node is restarted", func() { - runRestartNodeWithDrainingTest(teaURL, coffeeURL, files, &ns) - }) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(10). + Should(BeTrue()) - It("recovers when node is restarted abruptly", func() { - runRestartNodeAbruptlyTest(teaURL, coffeeURL, files, &ns) - }) -}) + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + activeNginxPodName = nginxPodName -func runRestartNodeWithDrainingTest(teaURL, coffeeURL string, files []string, ns *core.Namespace) { - runRestartNodeTest(teaURL, coffeeURL, files, ns, true) -} + setUpPortForward(activeNginxPodName, ns.Name) -func runRestartNodeAbruptlyTest(teaURL, coffeeURL string, files []string, ns *core.Namespace) { - runRestartNodeTest(teaURL, coffeeURL, files, ns, false) -} + Eventually( + func() error { + return checkForWorkingTraffic(teaURL, coffeeURL) + }). + WithTimeout(timeoutConfig.TestForTrafficTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + } -func runRestartNodeTest(teaURL, coffeeURL string, files []string, ns *core.Namespace, drain bool) { - nodeNames, err := getNodeNames() - Expect(err).ToNot(HaveOccurred()) - Expect(nodeNames).To(HaveLen(1)) + runRestartNodeTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace, drain bool) { + nodeNames, err := getNodeNames() + Expect(err).ToNot(HaveOccurred()) + Expect(nodeNames).To(HaveLen(1)) - kindNodeName := nodeNames[0] + kindNodeName := nodeNames[0] - Expect(clusterName).ToNot(BeNil(), "clusterName variable not set") - Expect(*clusterName).ToNot(BeEmpty()) - containerName := *clusterName + "-control-plane" + Expect(clusterName).ToNot(BeNil(), "clusterName variable not set") + Expect(*clusterName).ToNot(BeEmpty()) + containerName := *clusterName + "-control-plane" - if portFwdPort != 0 { - close(portForwardStopCh) - } + cleanUpPortForward() + + if drain { + output, err := exec.Command( + "kubectl", + "drain", + kindNodeName, + "--ignore-daemonsets", + "--delete-emptydir-data", + ).CombinedOutput() + + Expect(err).ToNot(HaveOccurred(), string(output)) + + output, err = exec.Command("kubectl", "delete", "node", kindNodeName).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(output)) + } + + _, err = exec.Command("docker", "restart", containerName).CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + + // need to wait for docker container to restart and be running before polling for ready NGF Pods or else we will error + Eventually( + func() bool { + output, err := exec.Command( + "docker", + "inspect", + "-f", + "{{.State.Running}}", + containerName, + ).CombinedOutput() + return strings.TrimSpace(string(output)) == "true" && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(BeTrue()) + + // ngf can often oscillate between ready and error, so we wait for a stable readiness in ngf + var podNames []string + Eventually( + func() bool { + podNames, err = framework.GetReadyNGFPodNames( + k8sClient, + ngfNamespace, + releaseName, + timeoutConfig.GetStatusTimeout, + ) + return len(podNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + newNGFPodName := podNames[0] + + // expected behavior is when node is drained, new pods will be created. when the node is + // abruptly restarted, new pods are not created. + if drain { + Expect(newNGFPodName).ToNot(Equal(activeNGFPodName)) + activeNGFPodName = newNGFPodName + } else { + Expect(newNGFPodName).To(Equal(activeNGFPodName)) + } + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + newNginxPodName := nginxPodNames[0] + + if drain { + Expect(newNginxPodName).ToNot(Equal(activeNginxPodName)) + activeNginxPodName = newNginxPodName + } else { + Expect(newNginxPodName).To(Equal(activeNginxPodName)) + } - if drain { - output, err := exec.Command( - "kubectl", - "drain", - kindNodeName, - "--ignore-daemonsets", - "--delete-emptydir-data", - ).CombinedOutput() + setUpPortForward(activeNginxPodName, ns.Name) - Expect(err).ToNot(HaveOccurred(), string(output)) + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, ns) - output, err = exec.Command("kubectl", "delete", "node", kindNodeName).CombinedOutput() - Expect(err).ToNot(HaveOccurred(), string(output)) + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } + + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } } - _, err = exec.Command("docker", "restart", containerName).CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + runRestartNodeWithDrainingTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { + runRestartNodeTest(teaURL, coffeeURL, files, ns, true) + } - // need to wait for docker container to restart and be running before polling for ready NGF Pods or else we will error - Eventually( - func() bool { - output, err := exec.Command( - "docker", - "inspect", - "-f", - "{{.State.Running}}", - containerName, - ).CombinedOutput() - return strings.TrimSpace(string(output)) == "true" && err == nil - }). - WithTimeout(timeoutConfig.CreateTimeout). - WithPolling(500 * time.Millisecond). - Should(BeTrue()) - - // ngf can often oscillate between ready and error, so we wait for a stable readiness in ngf - var podNames []string - Eventually( - func() bool { - podNames, err = framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout) - return len(podNames) == 1 && err == nil - }). - WithTimeout(timeoutConfig.CreateTimeout * 2). - WithPolling(500 * time.Millisecond). - MustPassRepeatedly(20). - Should(BeTrue()) - - ngfPodName := podNames[0] - Expect(ngfPodName).ToNot(BeEmpty()) - - if portFwdPort != 0 { - ports := []string{fmt.Sprintf("%d:80", ngfHTTPForwardedPort), fmt.Sprintf("%d:443", ngfHTTPSForwardedPort)} - portForwardStopCh = make(chan struct{}) - err = framework.PortForward(ctlr.GetConfigOrDie(), ngfNamespace, ngfPodName, ports, portForwardStopCh) - Expect(err).ToNot(HaveOccurred()) + runRestartNodeAbruptlyTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { + runRestartNodeTest(teaURL, coffeeURL, files, ns, false) } - checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, "", files, ns) - if errorLogs := getUnexpectedNginxErrorLogs(ngfPodName); errorLogs != "" { - Skip(fmt.Sprintf("NGINX has unexpected error logs: \n%s", errorLogs)) + getLeaderElectionLeaseHolderName := func() (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout) + defer cancel() + + var lease coordination.Lease + key := types.NamespacedName{Name: "ngf-test-nginx-gateway-fabric-leader-election", Namespace: ngfNamespace} + + if err := k8sClient.Get(ctx, key, &lease); err != nil { + return "", errors.New("could not retrieve leader election lease") + } + + if *lease.Spec.HolderIdentity == "" { + return "", errors.New("leader election lease holder identity is empty") + } + + return *lease.Spec.HolderIdentity, nil } -} -func runRecoveryTest(teaURL, coffeeURL, ngfPodName, containerName string, files []string, ns *core.Namespace) { - var ( - err error - leaseName string - ) + checkLeaderLeaseChange := func(originalLeaseName string) error { + leaseName, err := getLeaderElectionLeaseHolderName() + if err != nil { + return err + } - if containerName != nginxContainerName { - // Since we have already deployed resources and ran resourceManager.WaitForAppsToBeReadyWithPodCount earlier, - // we know that the applications are ready at this point. This could only be the case if NGF has written - // statuses, which could only be the case if NGF has the leader lease. Since there is only one instance - // of NGF in this test, we can be certain that this is the correct leaseholder name. - leaseName, err = getLeaderElectionLeaseHolderName() - Expect(err).ToNot(HaveOccurred()) + if originalLeaseName == leaseName { + return fmt.Errorf( + "expected originalLeaseName: %s, to not match current leaseName: %s", + originalLeaseName, + leaseName, + ) + } + + return nil } - restartContainer(ngfPodName, containerName) + BeforeAll(func() { + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + + activeNGFPodName = podNames[0] + + ns = core.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "graceful-recovery", + }, + } + + Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + activeNginxPodName = nginxPodNames[0] + + setUpPortForward(activeNginxPodName, ns.Name) + + if portFwdPort != 0 { + coffeeURL = fmt.Sprintf("%s:%d/coffee", baseHTTPURL, portFwdPort) + } + if portFwdHTTPSPort != 0 { + teaURL = fmt.Sprintf("%s:%d/tea", baseHTTPSURL, portFwdHTTPSPort) + } - if containerName != nginxContainerName { Eventually( func() error { - return checkLeaderLeaseChange(leaseName) + return checkForWorkingTraffic(teaURL, coffeeURL) }). - WithTimeout(timeoutConfig.GetLeaderLeaseTimeout). + WithTimeout(timeoutConfig.TestForTrafficTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) - } + }) - checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, containerName, files, ns) - if errorLogs := getUnexpectedNginxErrorLogs(ngfPodName); errorLogs != "" { - Skip(fmt.Sprintf("NGINX has unexpected error logs: \n%s", errorLogs)) - } -} + AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + }) -func restartContainer(ngfPodName, containerName string) { - var jobScript string - if containerName == "nginx" { - jobScript = "PID=$(pgrep -f \"nginx: master process\") && kill -9 $PID" - } else { - jobScript = "PID=$(pgrep -f \"/usr/bin/gateway\") && kill -9 $PID" - } + It("recovers when nginx container is restarted", func() { + restartNginxContainer(activeNginxPodName, ns.Name, nginxContainerName) - restartCount, err := getContainerRestartCount(ngfPodName, containerName) - Expect(err).ToNot(HaveOccurred()) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + activeNginxPodName = nginxPodNames[0] - job, err := runNodeDebuggerJob(ngfPodName, jobScript) - Expect(err).ToNot(HaveOccurred()) + setUpPortForward(activeNginxPodName, ns.Name) - Eventually( - func() error { - return checkContainerRestart(ngfPodName, containerName, restartCount) - }). - WithTimeout(timeoutConfig.ContainerRestartTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - // default propagation policy is metav1.DeletePropagationOrphan which does not delete the underlying - // pod created through the job after the job is deleted. Setting it to metav1.DeletePropagationBackground - // deletes the underlying pod after the job is deleted. - Expect(resourceManager.Delete( - []client.Object{job}, - client.PropagationPolicy(metav1.DeletePropagationBackground), - )).To(Succeed()) -} + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, &ns) -func checkContainerRestart(ngfPodName, containerName string, currentRestartCount int) error { - restartCount, err := getContainerRestartCount(ngfPodName, containerName) - if err != nil { - return err - } + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } - if restartCount != currentRestartCount+1 { - return fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", - restartCount, currentRestartCount+1) - } + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } + }) - return nil -} + It("recovers when NGF Pod is restarted", func() { + leaseName, err := getLeaderElectionLeaseHolderName() + Expect(err).ToNot(HaveOccurred()) -func checkForWorkingTraffic(teaURL, coffeeURL string) error { - if err := expectRequestToSucceed(teaURL, address, "URI: /tea"); err != nil { - return err - } - if err := expectRequestToSucceed(coffeeURL, address, "URI: /coffee"); err != nil { - return err - } - return nil -} + ngfPod, err := resourceManager.GetPod(ngfNamespace, activeNGFPodName) + Expect(err).ToNot(HaveOccurred()) -func checkForFailingTraffic(teaURL, coffeeURL string) error { - if err := expectRequestToFail(teaURL, address); err != nil { - return err - } - if err := expectRequestToFail(coffeeURL, address); err != nil { - return err - } - return nil -} + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.DeleteTimeout) + defer cancel() + + Expect(k8sClient.Delete(ctx, ngfPod)).To(Succeed()) + + var newNGFPodNames []string + Eventually( + func() bool { + newNGFPodNames, err = framework.GetReadyNGFPodNames( + k8sClient, + ngfNamespace, + releaseName, + timeoutConfig.GetStatusTimeout, + ) + return len(newNGFPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + + newNGFPodName := newNGFPodNames[0] + Expect(newNGFPodName).ToNot(BeEmpty()) + + Expect(newNGFPodName).ToNot(Equal(activeNGFPodName)) + activeNGFPodName = newNGFPodName + + Eventually( + func() error { + return checkLeaderLeaseChange(leaseName) + }). + WithTimeout(timeoutConfig.GetLeaderLeaseTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, &ns) + + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } + + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } + }) + + It("recovers when drained node is restarted", func() { + runRestartNodeWithDrainingTest(teaURL, coffeeURL, files, &ns) + }) + + It("recovers when node is restarted abruptly", func() { + if *plusEnabled { + Skip(fmt.Sprintf("Skipping test when using NGINX Plus due to known issue:" + + " https://github.com/nginx/nginx-gateway-fabric/issues/3248")) + } + runRestartNodeAbruptlyTest(teaURL, coffeeURL, files, &ns) + }) +}) func expectRequestToSucceed(appURL, address string, responseBodyMessage string) error { status, body, err := framework.Get(appURL, address, timeoutConfig.RequestTimeout, nil, nil) @@ -324,48 +551,10 @@ func expectRequestToFail(appURL, address string) error { return nil } -func checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, containerName string, files []string, ns *core.Namespace) { - Eventually( - func() error { - return checkForWorkingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - - Eventually( - func() error { - return checkForFailingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReadyWithPodCount(ns.Name, 2)).To(Succeed()) - - Eventually( - func() error { - return checkForWorkingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - // When the NGINX process is killed, some errors are expected in the NGF logs while we wait for the - // NGINX container to be restarted. Therefore, we don't want to check the NGF logs for errors when the container - // we restarted was NGINX. - if containerName != nginxContainerName { - checkNGFContainerLogsForErrors(ngfPodName) - } -} - -func getNginxErrorLogs(ngfPodName string) string { +func getNginxErrorLogs(nginxPodName, namespace string) string { nginxLogs, err := resourceManager.GetPodLogs( - ngfNamespace, - ngfPodName, + namespace, + nginxPodName, &core.PodLogOptions{Container: nginxContainerName}, ) Expect(err).ToNot(HaveOccurred()) @@ -391,7 +580,7 @@ func getNginxErrorLogs(ngfPodName string) string { return errorLogs } -func getUnexpectedNginxErrorLogs(ngfPodName string) string { +func getUnexpectedNginxErrorLogs(nginxPodName, namespace string) string { expectedErrStrings := []string{ "connect() failed (111: Connection refused)", "could not be resolved (host not found) during usage report", @@ -403,7 +592,7 @@ func getUnexpectedNginxErrorLogs(ngfPodName string) string { unexpectedErrors := "" - errorLogs := getNginxErrorLogs(ngfPodName) + errorLogs := getNginxErrorLogs(nginxPodName, namespace) for _, line := range strings.Split(errorLogs, "\n") { if !slices.ContainsFunc(expectedErrStrings, func(s string) bool { @@ -416,8 +605,8 @@ func getUnexpectedNginxErrorLogs(ngfPodName string) string { return unexpectedErrors } -// checkNGFContainerLogsForErrors checks NGF container's logs for any possible errors. -func checkNGFContainerLogsForErrors(ngfPodName string) { +// getNGFErrorLogs gets NGF container error logs. +func getNGFErrorLogs(ngfPodName string) string { ngfLogs, err := resourceManager.GetPodLogs( ngfNamespace, ngfPodName, @@ -425,111 +614,28 @@ func checkNGFContainerLogsForErrors(ngfPodName string) { ) Expect(err).ToNot(HaveOccurred()) - for _, line := range strings.Split(ngfLogs, "\n") { - Expect(line).ToNot(ContainSubstring("\"level\":\"error\""), line) - } -} - -func checkLeaderLeaseChange(originalLeaseName string) error { - leaseName, err := getLeaderElectionLeaseHolderName() - if err != nil { - return err - } - - if originalLeaseName == leaseName { - return fmt.Errorf("expected originalLeaseName: %s, to not match current leaseName: %s", originalLeaseName, leaseName) - } - - return nil -} - -func getLeaderElectionLeaseHolderName() (string, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var lease coordination.Lease - key := types.NamespacedName{Name: "ngf-test-nginx-gateway-fabric-leader-election", Namespace: ngfNamespace} - - if err := k8sClient.Get(ctx, key, &lease); err != nil { - return "", errors.New("could not retrieve leader election lease") - } - - if *lease.Spec.HolderIdentity == "" { - return "", errors.New("leader election lease holder identity is empty") - } - - return *lease.Spec.HolderIdentity, nil -} - -func getContainerRestartCount(ngfPodName, containerName string) (int, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var ngfPod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ngfNamespace, Name: ngfPodName}, &ngfPod); err != nil { - return 0, fmt.Errorf("error retrieving NGF Pod: %w", err) - } + errorLogs := "" - var restartCount int - for _, containerStatus := range ngfPod.Status.ContainerStatuses { - if containerStatus.Name == containerName { - restartCount = int(containerStatus.RestartCount) + for _, line := range strings.Split(ngfLogs, "\n") { + if strings.Contains(line, "\"level\":\"error\"") { + errorLogs += line + "\n" + break } } - return restartCount, nil -} - -func getNodeNames() ([]string, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - var nodes core.NodeList - - if err := k8sClient.List(ctx, &nodes); err != nil { - return nil, fmt.Errorf("error listing nodes: %w", err) - } - - names := make([]string, 0, len(nodes.Items)) - - for _, node := range nodes.Items { - names = append(names, node.Name) - } - - return names, nil + return errorLogs } -func runNodeDebuggerJob(ngfPodName, jobScript string) (*v1.Job, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var ngfPod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ngfNamespace, Name: ngfPodName}, &ngfPod); err != nil { - return nil, fmt.Errorf("error retrieving NGF Pod: %w", err) - } - - b, err := resourceManager.GetFileContents("graceful-recovery/node-debugger-job.yaml") - if err != nil { - return nil, fmt.Errorf("error processing node debugger job file: %w", err) - } - - job := &v1.Job{} - if err = yaml.Unmarshal(b.Bytes(), job); err != nil { - return nil, fmt.Errorf("error with yaml unmarshal: %w", err) - } - - job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] = ngfPod.Spec.NodeName - if len(job.Spec.Template.Spec.Containers) != 1 { - return nil, fmt.Errorf( - "expected node debugger job to contain one container, actual number: %d", - len(job.Spec.Template.Spec.Containers), - ) - } - job.Spec.Template.Spec.Containers[0].Args = []string{jobScript} - job.Namespace = ngfNamespace +// checkNGFContainerLogsForErrors checks NGF container's logs for any possible errors. +func checkNGFContainerLogsForErrors(ngfPodName string) { + ngfLogs, err := resourceManager.GetPodLogs( + ngfNamespace, + ngfPodName, + &core.PodLogOptions{Container: ngfContainerName}, + ) + Expect(err).ToNot(HaveOccurred()) - if err = resourceManager.Apply([]client.Object{job}); err != nil { - return nil, fmt.Errorf("error in applying job: %w", err) + for _, line := range strings.Split(ngfLogs, "\n") { + Expect(line).ToNot(ContainSubstring("\"level\":\"error\""), line) } - - return job, nil } diff --git a/tests/suite/longevity_test.go b/tests/suite/longevity_test.go index c768c71cb7..182c0fd676 100644 --- a/tests/suite/longevity_test.go +++ b/tests/suite/longevity_test.go @@ -82,6 +82,7 @@ var _ = Describe("Longevity", Label("longevity-setup", "longevity-teardown"), fu Expect(writeTrafficResults(resultsFile, homeDir, "coffee.txt", "HTTP")).To(Succeed()) Expect(writeTrafficResults(resultsFile, homeDir, "tea.txt", "HTTPS")).To(Succeed()) + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) }) diff --git a/tests/suite/manifests/clientsettings/ignored-gateway.yaml b/tests/suite/manifests/clientsettings/ignored-gateway.yaml deleted file mode 100644 index 74d8317b01..0000000000 --- a/tests/suite/manifests/clientsettings/ignored-gateway.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: Gateway -metadata: - name: ignored-gateway -spec: - gatewayClassName: nginx - listeners: - - name: http - port: 80 - protocol: HTTP - hostname: "*.example.com" diff --git a/tests/suite/manifests/clientsettings/invalid-csp.yaml b/tests/suite/manifests/clientsettings/invalid-csp.yaml deleted file mode 100644 index cedfb52e46..0000000000 --- a/tests/suite/manifests/clientsettings/invalid-csp.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: gateway.nginx.org/v1alpha1 -kind: ClientSettingsPolicy -metadata: - name: invalid-csp -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: ignored-gateway - body: - maxSize: 10m - timeout: 30s - keepAlive: - requests: 100 - time: 5s - timeout: - server: 2s - header: 1s diff --git a/tests/suite/manifests/clientsettings/invalid-route-csp.yaml b/tests/suite/manifests/clientsettings/invalid-route-csp.yaml new file mode 100644 index 0000000000..e856d6e5e9 --- /dev/null +++ b/tests/suite/manifests/clientsettings/invalid-route-csp.yaml @@ -0,0 +1,33 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: invalid-route +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /invalid + headers: + - name: host_name + value: v2 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.nginx.org/v1alpha1 +kind: ClientSettingsPolicy +metadata: + name: invalid-route-csp +spec: + targetRef: + group: gateway.networking.k8s.io + kind: HTTPRoute + name: invalid-route + keepAlive: + requests: 200 diff --git a/tests/suite/manifests/reconfig/cafe-routes.yaml b/tests/suite/manifests/reconfig/cafe-routes.yaml index 006a8eba92..454d093892 100644 --- a/tests/suite/manifests/reconfig/cafe-routes.yaml +++ b/tests/suite/manifests/reconfig/cafe-routes.yaml @@ -5,7 +5,7 @@ metadata: spec: parentRefs: - name: gateway - namespace: default + namespace: reconfig sectionName: http hostnames: - "cafe.example.com" @@ -23,7 +23,7 @@ metadata: spec: parentRefs: - name: gateway - namespace: default + namespace: reconfig sectionName: https hostnames: - "cafe.example.com" @@ -43,8 +43,8 @@ metadata: spec: parentRefs: - name: gateway + namespace: reconfig sectionName: https - namespace: default hostnames: - "cafe.example.com" rules: diff --git a/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml b/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml index d9a0381b8e..ea19a27470 100644 --- a/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml +++ b/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml @@ -3,24 +3,26 @@ nginxGateway: preStop: exec: command: - - /usr/bin/gateway - - sleep - - --duration=40s + - /usr/bin/gateway + - sleep + - --duration=40s + terminationGracePeriodSeconds: 50 -nginx: - lifecycle: - preStop: - exec: - command: - - /bin/sleep - - "40" - -terminationGracePeriodSeconds: 50 -affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - app.kubernetes.io/name: nginx-gateway +nginx: + pod: + terminationGracePeriodSeconds: 50 + container: + lifecycle: + preStop: + exec: + command: + - /bin/sleep + - "40" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: gateway-nginx diff --git a/tests/suite/manifests/scale/zero-downtime/values.yaml b/tests/suite/manifests/scale/zero-downtime/values.yaml index b4de7a5528..06f18b79a7 100644 --- a/tests/suite/manifests/scale/zero-downtime/values.yaml +++ b/tests/suite/manifests/scale/zero-downtime/values.yaml @@ -6,13 +6,15 @@ nginxGateway: - /usr/bin/gateway - sleep - --duration=40s + terminationGracePeriodSeconds: 50 nginx: - lifecycle: - preStop: - exec: - command: - - /bin/sleep - - "40" - -terminationGracePeriodSeconds: 50 + pod: + terminationGracePeriodSeconds: 50 + container: + lifecycle: + preStop: + exec: + command: + - /bin/sleep + - "40" diff --git a/tests/suite/manifests/tracing/nginxproxy.yaml b/tests/suite/manifests/tracing/nginxproxy.yaml deleted file mode 100644 index ed1f621047..0000000000 --- a/tests/suite/manifests/tracing/nginxproxy.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: gateway.nginx.org/v1alpha1 -kind: NginxProxy -metadata: - name: nginx-proxy -spec: - telemetry: - exporter: - endpoint: otel-collector-opentelemetry-collector.collector.svc:4317 - serviceName: my-test-svc - spanAttributes: - - key: testkey1 - value: testval1 diff --git a/tests/suite/nginxgateway_test.go b/tests/suite/nginxgateway_test.go index 1129310fab..90e1edb76c 100644 --- a/tests/suite/nginxgateway_test.go +++ b/tests/suite/nginxgateway_test.go @@ -98,7 +98,7 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f k8sClient, ngfNamespace, releaseName, - timeoutConfig.GetTimeout, + timeoutConfig.GetStatusTimeout, ) if err != nil { return "", err @@ -243,10 +243,7 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f return false } - return strings.Contains( - logs, - "\"current\":\"debug\",\"msg\":\"Log level changed\",\"prev\":\"info\"", - ) + return strings.Contains(logs, "\"level\":\"debug\"") }).WithTimeout(timeoutConfig.GetTimeout). WithPolling(500 * time.Millisecond). Should(BeTrue()) diff --git a/tests/suite/reconfig_test.go b/tests/suite/reconfig_test.go index 7503aef764..a28596e5ad 100644 --- a/tests/suite/reconfig_test.go +++ b/tests/suite/reconfig_test.go @@ -107,7 +107,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r return nil } - createResourcesGWLast := func(resourceCount int) { + createResources := func(resourceCount int) { ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout*5) defer cancel() @@ -140,44 +140,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r } Expect(resourceManager.WaitForPodsToBeReady(ctx, ns.Name)).To(Succeed()) } - - Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) - } - - createResourcesRoutesLast := func(resourceCount int) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout*5) - defer cancel() - - for i := 1; i <= resourceCount; i++ { - ns := core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace" + strconv.Itoa(i), - }, - } - Expect(k8sClient.Create(ctx, &ns)).To(Succeed()) - } - - Expect(createUniqueResources(resourceCount, "manifests/reconfig/cafe.yaml")).To(Succeed()) - - for i := 1; i <= resourceCount; i++ { - ns := core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace" + strconv.Itoa(i), - }, - } - Expect(resourceManager.WaitForPodsToBeReady(ctx, ns.Name)).To(Succeed()) - } - - Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles( - []string{ - "reconfig/cafe-secret.yaml", - "reconfig/reference-grant.yaml", - "reconfig/gateway.yaml", - }, - reconfigNamespace.Name)).To(Succeed()) - - Expect(createUniqueResources(resourceCount, "manifests/reconfig/cafe-routes.yaml")).To(Succeed()) } checkResourceCreation := func(resourceCount int) error { @@ -223,131 +185,64 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r return err } - getTimeStampFromLogLine := func(logLine string) string { - var timeStamp string - - timeStamp = strings.Split(logLine, "\"ts\":\"")[1] - // sometimes the log message will contain information on a "logger" followed by the "msg" - // while other times the "logger" will be omitted - timeStamp = strings.Split(timeStamp, "\",\"msg\"")[0] - timeStamp = strings.Split(timeStamp, "\",\"logger\"")[0] - - return timeStamp - } - - calculateTimeDifferenceBetweenLogLines := func(firstLine, secondLine string) (int, error) { - layout := time.RFC3339 - - firstTS := getTimeStampFromLogLine(firstLine) - secondTS := getTimeStampFromLogLine(secondLine) - - parsedTS1, err := time.Parse(layout, firstTS) - if err != nil { - return 0, err - } - - parsedTS2, err := time.Parse(layout, secondTS) - if err != nil { - return 0, err - } - - return int(parsedTS2.Sub(parsedTS1).Seconds()), nil - } + checkNginxConfIsPopulated := func(nginxPodName string, resourceCount int) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout*2) + defer cancel() - calculateTimeToReadyAverage := func(ngfLogs string) (string, error) { - var reconcilingLine, nginxReloadLine string - const maxCount = 5 - - var times [maxCount]int - var count int - - // parse the logs until it reaches a reconciling log line for a gateway resource, then it compares that - // timestamp to the next NGINX configuration update. When it reaches the NGINX configuration update line, - // it will reset the reconciling log line and set it to the next reconciling log line. - for _, line := range strings.Split(ngfLogs, "\n") { - if reconcilingLine == "" && - strings.Contains(line, "Reconciling the resource\",\"controller\"") && - strings.Contains(line, "\"controllerGroup\":\"gateway.networking.k8s.io\"") { - reconcilingLine = line + index := 1 + conf, _ := resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + for index <= resourceCount { + namespace := "namespace" + strconv.Itoa(resourceCount) + expUpstream := framework.ExpectedNginxField{ + Directive: "upstream", + Value: namespace + "_coffee" + namespace + "_80", + File: "http.conf", } - if strings.Contains(line, "NGINX configuration was successfully updated") && reconcilingLine != "" { - nginxReloadLine = line - - timeDifference, err := calculateTimeDifferenceBetweenLogLines(reconcilingLine, nginxReloadLine) - if err != nil { - return "", err - } - reconcilingLine = "" - - times[count] = timeDifference - count++ - if count == maxCount-1 { - break + // each call to ValidateNginxFieldExists takes about 1ms + if err := framework.ValidateNginxFieldExists(conf, expUpstream); err != nil { + select { + case <-ctx.Done(): + return fmt.Errorf("error validating nginx conf was generated in "+namespace+": %w", err.Error()) + default: + // each call to GetNginxConfig takes about 70ms + conf, _ = resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + continue } } - } - - var sum float64 - for _, time := range times { - sum += float64(time) - } - - avgTime := sum / float64(count+1) - if avgTime < 1 { - return "< 1", nil + index++ } - return strconv.FormatFloat(avgTime, 'f', -1, 64), nil + return nil } - calculateTimeToReadyTotal := func(ngfLogs, startingLogSubstring string) (string, error) { - var firstLine, lastLine string - for _, line := range strings.Split(ngfLogs, "\n") { - if firstLine == "" && strings.Contains(line, startingLogSubstring) { - firstLine = line - } - - if strings.Contains(line, "NGINX configuration was successfully updated") { - lastLine = line - } - } + calculateTimeToReadyTotal := func(nginxPodName string, startTime time.Time, resourceCount int) string { + Expect(checkNginxConfIsPopulated(nginxPodName, resourceCount)).To(Succeed()) + stopTime := time.Now() - timeToReadyTotal, err := calculateTimeDifferenceBetweenLogLines(firstLine, lastLine) - if err != nil { - return "", err - } + stringTimeToReadyTotal := strconv.Itoa(int(stopTime.Sub(startTime).Seconds())) - stringTimeToReadyTotal := strconv.Itoa(timeToReadyTotal) if stringTimeToReadyTotal == "0" { stringTimeToReadyTotal = "< 1" } - return stringTimeToReadyTotal, nil + return stringTimeToReadyTotal } - deployNGFReturnsNGFPodNameAndStartTime := func() (string, time.Time) { - var startTime time.Time - + collectMetrics := func( + resourceCount int, + ngfPodName string, + startTime time.Time, + ) reconfigTestResults { getStartTime := func() time.Time { return startTime } modifyStartTime := func() { startTime = startTime.Add(500 * time.Millisecond) } - cfg := getDefaultSetupCfg() - cfg.nfr = true - setup(cfg) - - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - ngfPodName := podNames[0] - startTime = time.Now() - queries := []string{ fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), fmt.Sprintf(`container_cpu_usage_seconds_total{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -361,16 +256,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r ).WithTimeout(metricExistTimeout).WithPolling(metricExistPolling).Should(Succeed()) } - return ngfPodName, startTime - } - - collectMetrics := func( - testDescription string, - resourceCount int, - timeToReadyStartingLogSubstring string, - ngfPodName string, - startTime time.Time, - ) { time.Sleep(2 * scrapeInterval) endTime := time.Now() @@ -388,12 +273,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r getEndTime := func() time.Time { return endTime } noOpModifier := func() {} - queries := []string{ - fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), - // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), - } - for _, q := range queries { Eventually( framework.CreateMetricExistChecker( @@ -406,16 +285,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r } checkNGFContainerLogsForErrors(ngfPodName) - nginxErrorLogs := getNginxErrorLogs(ngfPodName) - - reloadCount, err := framework.GetReloadCount(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) - - reloadAvgTime, err := framework.GetReloadAvgTime(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) - - reloadBuckets, err := framework.GetReloadBuckets(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) eventsCount, err := framework.GetEventsCount(promInstance, ngfPodName) Expect(err).ToNot(HaveOccurred()) @@ -426,158 +295,156 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r eventsBuckets, err := framework.GetEventsBuckets(promInstance, ngfPodName) Expect(err).ToNot(HaveOccurred()) - logs, err := resourceManager.GetPodLogs(ngfNamespace, ngfPodName, &core.PodLogOptions{ - Container: "nginx-gateway", - }) - Expect(err).ToNot(HaveOccurred()) - - // FIXME (bjee19): https://github.com/nginx/nginx-gateway-fabric/issues/2374 - // Find a way to calculate time to ready metrics without having to rely on specific log lines. - timeToReadyTotal, err := calculateTimeToReadyTotal(logs, timeToReadyStartingLogSubstring) - Expect(err).ToNot(HaveOccurred()) - - timeToReadyAvgSingle, err := calculateTimeToReadyAverage(logs) - Expect(err).ToNot(HaveOccurred()) - results := reconfigTestResults{ - TestDescription: testDescription, - EventsBuckets: eventsBuckets, - ReloadBuckets: reloadBuckets, - NumResources: resourceCount, - TimeToReadyTotal: timeToReadyTotal, - TimeToReadyAvgSingle: timeToReadyAvgSingle, - NGINXReloads: int(reloadCount), - NGINXReloadAvgTime: int(reloadAvgTime), - NGINXErrorLogs: nginxErrorLogs, - EventsCount: int(eventsCount), - EventsAvgTime: int(eventsAvgTime), + EventsBuckets: eventsBuckets, + NumResources: resourceCount, + EventsCount: int(eventsCount), + EventsAvgTime: int(eventsAvgTime), } - err = writeReconfigResults(outFile, results) - Expect(err).ToNot(HaveOccurred()) + return results } When("resources exist before startup", func() { testDescription := "Test 1: Resources exist before startup" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Starting NGINX Gateway Fabric" - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Starting NGINX Gateway Fabric" - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) + timeToReadyDescription := "From when NGF starts to when the NGINX configuration is fully configured" + DescribeTable(testDescription, + func(resourceCount int) { + createResources(resourceCount) + Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) + Expect(checkResourceCreation(resourceCount)).To(Succeed()) + + cfg := getDefaultSetupCfg() + cfg.nfr = true + setup(cfg) + + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + ngfPodName := podNames[0] + startTime := time.Now() + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames( + k8sClient, + reconfigNamespace.Name, + timeoutConfig.GetStatusTimeout, + ) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + timeToReadyTotal := calculateTimeToReadyTotal(nginxPodName, startTime, resourceCount) + + nginxErrorLogs := getNginxErrorLogs(nginxPodNames[0], reconfigNamespace.Name) + + results := collectMetrics( + resourceCount, + ngfPodName, + startTime, + ) + + results.NGINXErrorLogs = nginxErrorLogs + results.TimeToReadyTotal = timeToReadyTotal + results.TestDescription = testDescription + results.TimeToReadyDescription = timeToReadyDescription + + err = writeReconfigResults(outFile, results) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("gathers metrics after creating 30 resources", 30), + Entry("gathers metrics after creating 150 resources", 150), + ) }) When("NGF and Gateway resource are deployed first", func() { - testDescription := "Test 2: Start NGF, deploy Gateway, create many resources attached to GW" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"httproute\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesRoutesLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"httproute\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesRoutesLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - }) - - When("NGF and resources are deployed first", func() { - testDescription := "Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"gateway\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"gateway\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) + testDescription := "Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, " + + "create many resources attached to GW" + timeToReadyDescription := "From when NGINX receives the first configuration created by NGF to " + + "when the NGINX configuration is fully configured" + DescribeTable(testDescription, + func(resourceCount int) { + cfg := getDefaultSetupCfg() + cfg.nfr = true + setup(cfg) + + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + ngfPodName := podNames[0] + + Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames( + k8sClient, + reconfigNamespace.Name, + timeoutConfig.GetStatusTimeout, + ) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + // this checks if NGF has established a connection with agent and sent over the first nginx conf + Eventually( + func() bool { + conf, _ := resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + // a default upstream NGF creates + defaultUpstream := framework.ExpectedNginxField{ + Directive: "upstream", + Value: "invalid-backend-ref", + File: "http.conf", + } + + return framework.ValidateNginxFieldExists(conf, defaultUpstream) == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + startTime := time.Now() + + createResources(resourceCount) + Expect(checkResourceCreation(resourceCount)).To(Succeed()) + + timeToReadyTotal := calculateTimeToReadyTotal(nginxPodName, startTime, resourceCount) + + nginxErrorLogs := getNginxErrorLogs(nginxPodName, reconfigNamespace.Name) + + results := collectMetrics( + resourceCount, + ngfPodName, + startTime, + ) + + results.NGINXErrorLogs = nginxErrorLogs + results.TimeToReadyTotal = timeToReadyTotal + results.TestDescription = testDescription + results.TimeToReadyDescription = timeToReadyDescription + + err = writeReconfigResults(outFile, results) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("gathers metrics after creating 30 resources", 30), + Entry("gathers metrics after creating 150 resources", 150), + ) }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, reconfigNamespace.Name) + Expect(cleanupResources()).Should(Succeed()) teardown(releaseName) }) @@ -595,32 +462,23 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r }) type reconfigTestResults struct { - TestDescription string - TimeToReadyTotal string - TimeToReadyAvgSingle string - NGINXErrorLogs string - EventsBuckets []framework.Bucket - ReloadBuckets []framework.Bucket - NumResources int - NGINXReloads int - NGINXReloadAvgTime int - EventsCount int - EventsAvgTime int + TestDescription string + TimeToReadyTotal string + TimeToReadyDescription string + NGINXErrorLogs string + EventsBuckets []framework.Bucket + NumResources int + EventsCount int + EventsAvgTime int } const reconfigResultTemplate = ` ## {{ .TestDescription }} - NumResources {{ .NumResources }} -### Reloads and Time to Ready +### Time to Ready +Time To Ready Description: {{ .TimeToReadyDescription }} - TimeToReadyTotal: {{ .TimeToReadyTotal }}s -- TimeToReadyAvgSingle: {{ .TimeToReadyAvgSingle }}s -- NGINX Reloads: {{ .NGINXReloads }} -- NGINX Reload Average Time: {{ .NGINXReloadAvgTime }}ms -- Reload distribution: -{{- range .ReloadBuckets }} - - {{ .Le }}ms: {{ .Val }} -{{- end }} ### Event Batch Processing diff --git a/tests/suite/sample_test.go b/tests/suite/sample_test.go index bd883ae710..191d20134d 100644 --- a/tests/suite/sample_test.go +++ b/tests/suite/sample_test.go @@ -17,29 +17,40 @@ import ( ) var _ = Describe("Basic test example", Label("functional"), func() { - files := []string{ - "hello-world/apps.yaml", - "hello-world/gateway.yaml", - "hello-world/routes.yaml", - } + var ( + files = []string{ + "hello-world/apps.yaml", + "hello-world/gateway.yaml", + "hello-world/routes.yaml", + } - var ns core.Namespace + namespace = "helloworld" + ) BeforeEach(func() { - ns = core.Namespace{ + ns := &core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "helloworld", + Name: namespace, }, } - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) }) AfterEach(func() { - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() + + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) It("sends traffic", func() { diff --git a/tests/suite/scale_test.go b/tests/suite/scale_test.go index 4dadb97d60..8a04a088e7 100644 --- a/tests/suite/scale_test.go +++ b/tests/suite/scale_test.go @@ -119,31 +119,17 @@ var _ = Describe("Scale test", Ordered, Label("nfr", "scale"), func() { type scaleTestResults struct { Name string EventsBuckets []framework.Bucket - ReloadBuckets []framework.Bucket EventsAvgTime int EventsCount int NGFContainerRestarts int NGFErrors int NginxContainerRestarts int NginxErrors int - ReloadAvgTime int - ReloadCount int - ReloadErrsCount int } const scaleResultTemplate = ` ## Test {{ .Name }} -### Reloads - -- Total: {{ .ReloadCount }} -- Total Errors: {{ .ReloadErrsCount }} -- Average Time: {{ .ReloadAvgTime }}ms -- Reload distribution: -{{- range .ReloadBuckets }} - - {{ .Le }}ms: {{ .Val }} -{{- end }} - ### Event Batch Processing - Total: {{ .EventsCount }} @@ -176,12 +162,14 @@ The logs are attached only if there are errors. } checkLogErrors := func( - containerName string, + containerName, + podName, + namespace, + fileName string, substrings []string, ignoredSubstrings []string, - fileName string, ) int { - logs, err := resourceManager.GetPodLogs(ngfNamespace, ngfPodName, &core.PodLogOptions{ + logs, err := resourceManager.GetPodLogs(namespace, podName, &core.PodLogOptions{ Container: containerName, }) Expect(err).ToNot(HaveOccurred()) @@ -237,7 +225,7 @@ The logs are attached only if there are errors. fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), fmt.Sprintf(`container_cpu_usage_seconds_total{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -280,7 +268,7 @@ The logs are attached only if there are errors. queries = []string{ fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -337,18 +325,6 @@ The logs are attached only if there are errors. Expect(os.Remove(cpuCSV)).To(Succeed()) - reloadCount, err := framework.GetReloadCountWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadErrsCount, err := framework.GetReloadErrsCountWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadAvgTime, err := framework.GetReloadAvgTimeWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadBuckets, err := framework.GetReloadBucketsWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - eventsCount, err := framework.GetEventsCountWithStartTime(promInstance, ngfPodName, startTime) Expect(err).ToNot(HaveOccurred()) @@ -362,43 +338,53 @@ The logs are attached only if there are errors. ngfErrors := checkLogErrors( "nginx-gateway", + ngfPodName, + ngfNamespace, + filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "ngf", *plusEnabled)), []string{"error"}, []string{`"logger":"usageReporter`}, // ignore usageReporter errors - filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "ngf", *plusEnabled)), ) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName := nginxPodNames[0] + nginxErrors := checkLogErrors( "nginx", + nginxPodName, + namespace, + filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "nginx", *plusEnabled)), []string{framework.ErrorNGINXLog, framework.EmergNGINXLog, framework.CritNGINXLog, framework.AlertNGINXLog}, nil, - filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "nginx", *plusEnabled)), ) // Check container restarts - pod, err := resourceManager.GetPod(ngfNamespace, ngfPodName) + ngfPod, err := resourceManager.GetPod(ngfNamespace, ngfPodName) + Expect(err).ToNot(HaveOccurred()) + + nginxPod, err := resourceManager.GetPod(namespace, nginxPodName) Expect(err).ToNot(HaveOccurred()) - findRestarts := func(name string) int { + findRestarts := func(containerName string, pod *core.Pod) int { for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.Name == name { + if containerStatus.Name == containerName { return int(containerStatus.RestartCount) } } - Fail(fmt.Sprintf("container %s not found", name)) + Fail(fmt.Sprintf("container %s not found", containerName)) return 0 } - ngfRestarts := findRestarts("nginx-gateway") - nginxRestarts := findRestarts("nginx") + ngfRestarts := findRestarts("nginx-gateway", ngfPod) + nginxRestarts := findRestarts("nginx", nginxPod) // Write results results := scaleTestResults{ Name: testName, - ReloadCount: int(reloadCount), - ReloadErrsCount: int(reloadErrsCount), - ReloadAvgTime: int(reloadAvgTime), - ReloadBuckets: reloadBuckets, EventsCount: int(eventsCount), EventsAvgTime: int(eventsAvgTime), EventsBuckets: eventsBuckets, @@ -428,6 +414,22 @@ The logs are attached only if there are errors. for i := range len(objects.ScaleIterationGroups) { Expect(resourceManager.Apply(objects.ScaleIterationGroups[i])).To(Succeed()) + if i == 0 { + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + } + var url string if protocol == "http" && portFwdPort != 0 { url = fmt.Sprintf("%s://%d.example.com:%d", protocol, i, portFwdPort) @@ -441,7 +443,7 @@ The logs are attached only if there are errors. Eventually( framework.CreateResponseChecker(url, address, timeoutConfig.RequestTimeout), - ).WithTimeout(5 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) + ).WithTimeout(6 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) ttr := time.Since(startCheck) @@ -466,6 +468,21 @@ The logs are attached only if there are errors. Expect(resourceManager.ApplyFromFiles(upstreamsManifests, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + var url string if portFwdPort != 0 { url = fmt.Sprintf("http://hello.example.com:%d", portFwdPort) @@ -598,6 +615,21 @@ The logs are attached only if there are errors. Expect(resourceManager.ApplyFromFiles(matchesManifests, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + var port int if portFwdPort != 0 { port = portFwdPort @@ -611,7 +643,7 @@ The logs are attached only if there are errors. text := fmt.Sprintf("\n## Test %s\n\n", testName) - _, err := fmt.Fprint(outFile, text) + _, err = fmt.Fprint(outFile, text) Expect(err).ToNot(HaveOccurred()) run := func(t framework.Target) { @@ -650,8 +682,10 @@ The logs are attached only if there are errors. }) AfterEach(func() { - teardown(releaseName) + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) + teardown(releaseName) }) AfterAll(func() { @@ -678,13 +712,13 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim } var ( - outFile *os.File - resultsDir string - ngfDeploymentName string - ns core.Namespace - metricsCh chan *metricsResults + outFile *os.File + resultsDir string + ns core.Namespace + metricsCh chan *metricsResults - files = []string{ + numCoffeeAndTeaPods = 20 + files = []string{ "scale/zero-downtime/cafe.yaml", "scale/zero-downtime/cafe-secret.yaml", "scale/zero-downtime/gateway-1.yaml", @@ -825,12 +859,12 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim numReplicas int }{ { - name: "One NGF Pod runs per node", + name: "One NGINX Pod runs per node", valuesFile: "manifests/scale/zero-downtime/values-affinity.yaml", numReplicas: 12, // equals number of nodes }, { - name: "Multiple NGF Pods run per node", + name: "Multiple NGINX Pods run per node", valuesFile: "manifests/scale/zero-downtime/values.yaml", numReplicas: 24, // twice the number of nodes }, @@ -843,19 +877,33 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim cfg.nfr = true setup(cfg, "--values", test.valuesFile) - deploy, err := resourceManager.GetNGFDeployment(ngfNamespace, releaseName) - Expect(err).ToNot(HaveOccurred()) - ngfDeploymentName = deploy.GetName() - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, ns.Name) + _, err = fmt.Fprintf(outFile, "\n## %s Test Results\n", test.name) Expect(err).ToNot(HaveOccurred()) }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + cleanUpPortForward() + teardown(releaseName) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) }) @@ -882,8 +930,8 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // scale NGF up one at a time for i := 2; i <= test.numReplicas; i++ { - Eventually(resourceManager.ScaleDeployment). - WithArguments(ngfNamespace, ngfDeploymentName, int32(i)). + Eventually(resourceManager.ScaleNginxDeployment). + WithArguments(ngfNamespace, releaseName, int32(i)). WithTimeout(timeoutConfig.UpdateTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) @@ -893,7 +941,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout) - Expect(resourceManager.WaitForPodsToBeReadyWithCount(ctx, ngfNamespace, i)).To(Succeed()) + Expect(resourceManager.WaitForPodsToBeReadyWithCount(ctx, ns.Name, i+numCoffeeAndTeaPods)).To(Succeed()) Expect(resourceManager.WaitForGatewayObservedGeneration(ctx, ns.Name, "gateway", i)).To(Succeed()) cancel() @@ -935,8 +983,8 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // scale NGF down one at a time currentGen := test.numReplicas for i := test.numReplicas - 1; i >= 1; i-- { - Eventually(resourceManager.ScaleDeployment). - WithArguments(ngfNamespace, ngfDeploymentName, int32(i)). + Eventually(resourceManager.ScaleNginxDeployment). + WithArguments(ngfNamespace, releaseName, int32(i)). WithTimeout(timeoutConfig.UpdateTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) @@ -1005,7 +1053,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // allow traffic flow to start time.Sleep(2 * time.Second) - Expect(resourceManager.ScaleDeployment(ngfNamespace, ngfDeploymentName, int32(test.numReplicas))).To(Succeed()) + Expect(resourceManager.ScaleNginxDeployment(ngfNamespace, releaseName, int32(test.numReplicas))).To(Succeed()) Expect(resourceManager.ApplyFromFiles([]string{"scale/zero-downtime/gateway-2.yaml"}, ns.Name)).To(Succeed()) checkGatewayListeners(3) @@ -1037,7 +1085,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // allow traffic flow to start time.Sleep(2 * time.Second) - Expect(resourceManager.ScaleDeployment(ngfNamespace, ngfDeploymentName, int32(1))).To(Succeed()) + Expect(resourceManager.ScaleNginxDeployment(ngfNamespace, releaseName, int32(1))).To(Succeed()) Expect(resourceManager.ApplyFromFiles([]string{"scale/zero-downtime/gateway-1.yaml"}, ns.Name)).To(Succeed()) checkGatewayListeners(2) diff --git a/tests/suite/scripts/longevity-wrk.sh b/tests/suite/scripts/longevity-wrk.sh index e7d3a6b23a..1165cfa6b5 100755 --- a/tests/suite/scripts/longevity-wrk.sh +++ b/tests/suite/scripts/longevity-wrk.sh @@ -1,6 +1,15 @@ #!/usr/bin/env bash -SVC_IP=$(kubectl -n nginx-gateway get svc ngf-longevity-nginx-gateway-fabric -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +while true; do + SVC_IP=$(kubectl -n longevity get svc gateway-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + if [[ -n $SVC_IP ]]; then + echo "Service IP assigned: $SVC_IP" + break + fi + + echo "Still waiting for nginx Service IP..." + sleep 5 +done echo "${SVC_IP} cafe.example.com" | sudo tee -a /etc/hosts diff --git a/tests/suite/snippets_filter_test.go b/tests/suite/snippets_filter_test.go index 1edf2b2fe3..de6583d79b 100644 --- a/tests/suite/snippets_filter_test.go +++ b/tests/suite/snippets_filter_test.go @@ -10,7 +10,6 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" @@ -28,6 +27,8 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter } namespace = "snippets-filter" + + nginxPodName string ) BeforeAll(func() { @@ -40,9 +41,19 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -56,6 +67,7 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) Expect(resourceManager.DeleteFromFiles(snippetsFilter, namespace)).To(Succeed()) }) @@ -68,8 +80,11 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter for _, name := range snippetsFilterNames { nsname := types.NamespacedName{Name: name, Namespace: namespace} - err := waitForSnippetsFilterToBeAccepted(nsname) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("%s was not accepted", name)) + Eventually(checkForSnippetsFilterToBeAccepted). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500*time.Millisecond). + Should(Succeed(), fmt.Sprintf("%s was not accepted", name)) } }) @@ -104,13 +119,8 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter grpcRouteSuffix := fmt.Sprintf("%s_grpc-all-contexts.conf", namespace) BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) @@ -221,7 +231,11 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) nsname := types.NamespacedName{Name: "tea", Namespace: namespace} - Expect(waitForHTTPRouteToHaveGatewayNotProgrammedCond(nsname)).To(Succeed()) + Eventually(checkHTTPRouteToHaveGatewayNotProgrammedCond). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) }) @@ -232,116 +246,99 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) nsname := types.NamespacedName{Name: "soda", Namespace: namespace} - Expect(waitForHTTPRouteToHaveGatewayNotProgrammedCond(nsname)).To(Succeed()) + Eventually(checkHTTPRouteToHaveGatewayNotProgrammedCond). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) }) }) }) -func waitForHTTPRouteToHaveGatewayNotProgrammedCond(httpRouteNsName types.NamespacedName) error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout*2) +func checkHTTPRouteToHaveGatewayNotProgrammedCond(httpRouteNsName types.NamespacedName) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) defer cancel() GinkgoWriter.Printf( - "Waiting for HTTPRoute %q to have the condition Accepted/True/GatewayNotProgrammed\n", + "Checking for HTTPRoute %q to have the condition Accepted/True/GatewayNotProgrammed\n", httpRouteNsName, ) - return wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(ctx context.Context) (bool, error) { - var hr v1.HTTPRoute - var err error + var hr v1.HTTPRoute + var err error - if err = k8sClient.Get(ctx, httpRouteNsName, &hr); err != nil { - return false, err - } + if err = k8sClient.Get(ctx, httpRouteNsName, &hr); err != nil { + return err + } - if len(hr.Status.Parents) == 0 { - return false, nil - } + if len(hr.Status.Parents) != 1 { + return fmt.Errorf("httproute has %d parent statuses, expected 1", len(hr.Status.Parents)) + } - if len(hr.Status.Parents) != 1 { - return false, fmt.Errorf("httproute has %d parent statuses, expected 1", len(hr.Status.Parents)) - } + parent := hr.Status.Parents[0] + if parent.Conditions == nil { + return fmt.Errorf("expected parent conditions to not be nil") + } - parent := hr.Status.Parents[0] - if parent.Conditions == nil { - return false, fmt.Errorf("expected parent conditions to not be nil") - } + cond := parent.Conditions[1] + if cond.Type != string(v1.GatewayConditionAccepted) { + return fmt.Errorf("expected condition type to be Accepted, got %s", cond.Type) + } - cond := parent.Conditions[1] - if cond.Type != string(v1.GatewayConditionAccepted) { - return false, fmt.Errorf("expected condition type to be Accepted, got %s", cond.Type) - } + if cond.Status != metav1.ConditionFalse { + return fmt.Errorf("expected condition status to be False, got %s", cond.Status) + } - if cond.Status != metav1.ConditionFalse { - return false, fmt.Errorf("expected condition status to be False, got %s", cond.Status) - } + if cond.Reason != string(conditions.RouteReasonGatewayNotProgrammed) { + return fmt.Errorf("expected condition reason to be GatewayNotProgrammed, got %s", cond.Reason) + } - if cond.Reason != string(conditions.RouteReasonGatewayNotProgrammed) { - return false, fmt.Errorf("expected condition reason to be GatewayNotProgrammed, got %s", cond.Reason) - } - return err == nil, err - }, - ) + return nil } -func waitForSnippetsFilterToBeAccepted(snippetsFilterNsNames types.NamespacedName) error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout) +func checkForSnippetsFilterToBeAccepted(snippetsFilterNsNames types.NamespacedName) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) defer cancel() GinkgoWriter.Printf( - "Waiting for SnippetsFilter %q to have the condition Accepted/True/Accepted\n", + "Checking for SnippetsFilter %q to have the condition Accepted/True/Accepted\n", snippetsFilterNsNames, ) - return wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(ctx context.Context) (bool, error) { - var sf ngfAPI.SnippetsFilter - var err error + var sf ngfAPI.SnippetsFilter + var err error - if err = k8sClient.Get(ctx, snippetsFilterNsNames, &sf); err != nil { - return false, err - } - - if len(sf.Status.Controllers) == 0 { - return false, nil - } + if err = k8sClient.Get(ctx, snippetsFilterNsNames, &sf); err != nil { + return err + } - if len(sf.Status.Controllers) != 1 { - return false, fmt.Errorf("snippetsFilter has %d controller statuses, expected 1", len(sf.Status.Controllers)) - } + if len(sf.Status.Controllers) != 1 { + return fmt.Errorf("snippetsFilter has %d controller statuses, expected 1", len(sf.Status.Controllers)) + } - status := sf.Status.Controllers[0] - if status.ControllerName != ngfControllerName { - return false, fmt.Errorf("expected controller name to be %s, got %s", ngfControllerName, status.ControllerName) - } + status := sf.Status.Controllers[0] + if status.ControllerName != ngfControllerName { + return fmt.Errorf("expected controller name to be %s, got %s", ngfControllerName, status.ControllerName) + } - condition := status.Conditions[0] - if condition.Type != string(ngfAPI.SnippetsFilterConditionTypeAccepted) { - return false, fmt.Errorf("expected condition type to be Accepted, got %s", condition.Type) - } + condition := status.Conditions[0] + if condition.Type != string(ngfAPI.SnippetsFilterConditionTypeAccepted) { + return fmt.Errorf("expected condition type to be Accepted, got %s", condition.Type) + } - if status.Conditions[0].Status != metav1.ConditionTrue { - return false, fmt.Errorf("expected condition status to be %s, got %s", metav1.ConditionTrue, condition.Status) - } + if status.Conditions[0].Status != metav1.ConditionTrue { + return fmt.Errorf("expected condition status to be %s, got %s", metav1.ConditionTrue, condition.Status) + } - if status.Conditions[0].Reason != string(ngfAPI.SnippetsFilterConditionReasonAccepted) { - return false, fmt.Errorf( - "expected condition reason to be %s, got %s", - ngfAPI.SnippetsFilterConditionReasonAccepted, - condition.Reason, - ) - } + if status.Conditions[0].Reason != string(ngfAPI.SnippetsFilterConditionReasonAccepted) { + return fmt.Errorf( + "expected condition reason to be %s, got %s", + ngfAPI.SnippetsFilterConditionReasonAccepted, + condition.Reason, + ) + } - return err == nil, err - }, - ) + return nil } diff --git a/tests/suite/system_suite_test.go b/tests/suite/system_suite_test.go index 684ad1d998..6a8174bdb9 100644 --- a/tests/suite/system_suite_test.go +++ b/tests/suite/system_suite_test.go @@ -70,20 +70,21 @@ var ( var ( //go:embed manifests/* - manifests embed.FS - k8sClient client.Client - resourceManager framework.ResourceManager - portForwardStopCh chan struct{} - portFwdPort int - portFwdHTTPSPort int - timeoutConfig framework.TimeoutConfig - localChartPath string - address string - version string - chartVersion string - clusterInfo framework.ClusterInfo - skipNFRTests bool - logs string + manifests embed.FS + k8sClient client.Client + resourceManager framework.ResourceManager + portForwardStopCh chan struct{} + portFwdPort int + portFwdHTTPSPort int + timeoutConfig framework.TimeoutConfig + localChartPath string + address string + version string + chartVersion string + clusterInfo framework.ClusterInfo + skipNFRTests bool + logs string + nginxCrossplanePath string ) var formatNginxPlusEdgeImagePath = "us-docker.pkg.dev/%s/nginx-gateway-fabric/nginx-plus" @@ -171,6 +172,8 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { version = "edge" } + nginxCrossplanePath = "us-docker.pkg.dev/" + *gkeProject + "/nginx-gateway-fabric" + if !cfg.deploy { return } @@ -185,20 +188,34 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { ) Expect(err).ToNot(HaveOccurred()) Expect(podNames).ToNot(BeEmpty()) +} + +func setUpPortForward(nginxPodName, nginxNamespace string) { + var err error if *serviceType != "LoadBalancer" { ports := []string{fmt.Sprintf("%d:80", ngfHTTPForwardedPort), fmt.Sprintf("%d:443", ngfHTTPSForwardedPort)} portForwardStopCh = make(chan struct{}) - err = framework.PortForward(k8sConfig, installCfg.Namespace, podNames[0], ports, portForwardStopCh) + err = framework.PortForward(resourceManager.K8sConfig, nginxNamespace, nginxPodName, ports, portForwardStopCh) address = "127.0.0.1" portFwdPort = ngfHTTPForwardedPort portFwdHTTPSPort = ngfHTTPSForwardedPort } else { - address, err = resourceManager.GetLBIPAddress(installCfg.Namespace) + address, err = resourceManager.GetLBIPAddress(nginxNamespace) } Expect(err).ToNot(HaveOccurred()) } +// cleanUpPortForward closes the port forward channel and needs to be called before deleting any gateways or else +// the logs will be flooded with port forward errors. +func cleanUpPortForward() { + if portFwdPort != 0 { + close(portForwardStopCh) + portFwdPort = 0 + portFwdHTTPSPort = 0 + } +} + func createNGFInstallConfig(cfg setupConfig, extraInstallArgs ...string) framework.InstallationConfig { installCfg := framework.InstallationConfig{ ReleaseName: cfg.releaseName, @@ -252,12 +269,6 @@ func createNGFInstallConfig(cfg setupConfig, extraInstallArgs ...string) framewo } func teardown(relName string) { - if portFwdPort != 0 { - close(portForwardStopCh) - portFwdPort = 0 - portFwdHTTPSPort = 0 - } - cfg := framework.InstallationConfig{ ReleaseName: relName, Namespace: ngfNamespace, @@ -340,7 +351,7 @@ var _ = AfterSuite(func() { AddReportEntry("Events", events, ReportEntryVisibilityNever) logs = framework.GetLogs(resourceManager, ngfNamespace, releaseName) - AddReportEntry("Logs", logs, ReportEntryVisibilityNever) + AddReportEntry("NGF Logs", logs, ReportEntryVisibilityNever) labelFilter := GinkgoLabelFilter() if !strings.Contains(labelFilter, "longevity-setup") { diff --git a/tests/suite/telemetry_test.go b/tests/suite/telemetry_test.go index ba15f130f1..7c81b74342 100644 --- a/tests/suite/telemetry_test.go +++ b/tests/suite/telemetry_test.go @@ -12,6 +12,10 @@ import ( ) var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func() { + // To run the tracing test, you must build NGF with the following values: + // TELEMETRY_ENDPOINT=otel-collector-opentelemetry-collector.collector.svc.cluster.local:4317 + // TELEMETRY_ENDPOINT_INSECURE = true + BeforeEach(func() { // Because NGF reports telemetry on start, we need to install the collector first. @@ -22,10 +26,9 @@ var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func( // Install NGF // Note: the BeforeSuite call doesn't install NGF for 'telemetry' label - setup( - getDefaultSetupCfg(), - "--set", "nginxGateway.productTelemetry.enable=true", - ) + cfg := getDefaultSetupCfg() + cfg.telemetry = true + setup(cfg) }) AfterEach(func() { @@ -86,10 +89,12 @@ var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func( "GatewayAttachedClientSettingsPolicyCount: Int(0)", "RouteAttachedClientSettingsPolicyCount: Int(0)", "ObservabilityPolicyCount: Int(0)", - "NginxProxyCount: Int(0)", + "NginxProxyCount: Int(1)", "SnippetsFilterCount: Int(0)", "UpstreamSettingsPolicyCount: Int(0)", - "NGFReplicaCount: Int(1)", + "GatewayAttachedNpCount: Int(0)", + "NginxPodCount: Int(0)", + "ControlPlanePodCount: Int(1)", }, ) }) diff --git a/tests/suite/tracing_test.go b/tests/suite/tracing_test.go index e1d6aceff5..f56a083426 100644 --- a/tests/suite/tracing_test.go +++ b/tests/suite/tracing_test.go @@ -19,6 +19,7 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" "github.com/nginx/nginx-gateway-fabric/tests/framework" ) @@ -26,26 +27,58 @@ import ( // This test can be flaky when waiting to see traces show up in the collector logs. // Sometimes they get there right away, sometimes it takes 30 seconds. Retries were // added to attempt to mitigate the issue, but it didn't fix it 100%. -var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), func() { +var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "tracing"), func() { + // To run the tracing test, you must build NGF with the following values: + // TELEMETRY_ENDPOINT=otel-collector-opentelemetry-collector.collector.svc.cluster.local:4317 + // TELEMETRY_ENDPOINT_INSECURE = true + var ( files = []string{ "hello-world/apps.yaml", "hello-world/gateway.yaml", "hello-world/routes.yaml", } - nginxProxyFile = "tracing/nginxproxy.yaml" policySingleFile = "tracing/policy-single.yaml" policyMultipleFile = "tracing/policy-multiple.yaml" - ns core.Namespace + namespace = "helloworld" collectorPodName, helloURL, worldURL, helloworldURL string ) + updateNginxProxyTelemetrySpec := func(telemetry ngfAPIv1alpha2.Telemetry) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout) + defer cancel() + + key := types.NamespacedName{Name: "ngf-test-proxy-config", Namespace: "nginx-gateway"} + var nginxProxy ngfAPIv1alpha2.NginxProxy + Expect(k8sClient.Get(ctx, key, &nginxProxy)).To(Succeed()) + + nginxProxy.Spec.Telemetry = &telemetry + + Expect(k8sClient.Update(ctx, &nginxProxy)).To(Succeed()) + } + + BeforeAll(func() { + telemetry := ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("otel-collector-opentelemetry-collector.collector.svc:4317"), + }, + ServiceName: helpers.GetPointer("my-test-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{{ + Key: "testkey1", + Value: "testval1", + }}, + } + + updateNginxProxyTelemetrySpec(telemetry) + }) + + // BeforeEach is needed because FlakeAttempts do not re-run BeforeAll/AfterAll nodes BeforeEach(func() { - ns = core.Namespace{ + ns := &core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "helloworld", + Name: namespace, }, } @@ -55,9 +88,15 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu collectorPodName, err = framework.GetCollectorPodName(resourceManager) Expect(err).ToNot(HaveOccurred()) - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) url := "http://foo.example.com" helloURL = url + "/hello" @@ -71,44 +110,21 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) output, err := framework.UninstallCollector(resourceManager) Expect(err).ToNot(HaveOccurred(), string(output)) - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteFromFiles( - []string{nginxProxyFile, policySingleFile, policyMultipleFile}, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) - - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout) - defer cancel() - - key := types.NamespacedName{Name: gatewayClassName} - var gwClass gatewayv1.GatewayClass - Expect(k8sClient.Get(ctx, key, &gwClass)).To(Succeed()) + cleanUpPortForward() - gwClass.Spec.ParametersRef = nil - - Expect(k8sClient.Update(ctx, &gwClass)).To(Succeed()) + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteFromFiles( + []string{policySingleFile, policyMultipleFile}, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) - updateGatewayClass := func() error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout) - defer cancel() - - key := types.NamespacedName{Name: gatewayClassName} - var gwClass gatewayv1.GatewayClass - if err := k8sClient.Get(ctx, key, &gwClass); err != nil { - return err - } - - gwClass.Spec.ParametersRef = &gatewayv1.ParametersReference{ - Group: ngfAPIv1alpha1.GroupName, - Kind: gatewayv1.Kind("NginxProxy"), - Name: "nginx-proxy", - } - - return k8sClient.Update(ctx, &gwClass) - } + AfterAll(func() { + updateNginxProxyTelemetrySpec(ngfAPIv1alpha2.Telemetry{}) + }) sendRequests := func(url string, count int) { for range count { @@ -168,11 +184,9 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu // install tracing configuration traceFiles := []string{ - nginxProxyFile, policySingleFile, } - Expect(resourceManager.ApplyFromFiles(traceFiles, ns.Name)).To(Succeed()) - Expect(updateGatewayClass()).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(traceFiles, namespace)).To(Succeed()) checkStatusAndTraces() @@ -192,11 +206,9 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu It("sends tracing spans for one policy attached to multiple routes", func() { // install tracing configuration traceFiles := []string{ - nginxProxyFile, policyMultipleFile, } - Expect(resourceManager.ApplyFromFiles(traceFiles, ns.Name)).To(Succeed()) - Expect(updateGatewayClass()).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(traceFiles, namespace)).To(Succeed()) checkStatusAndTraces() diff --git a/tests/suite/upgrade_test.go b/tests/suite/upgrade_test.go index d0dec2fc15..a62de97356 100644 --- a/tests/suite/upgrade_test.go +++ b/tests/suite/upgrade_test.go @@ -67,7 +67,12 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - var err error + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], ns.Name) + resultsDir, err = framework.CreateResultsDir("ngf-upgrade", version) Expect(err).ToNot(HaveOccurred()) @@ -78,12 +83,16 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) resultsFile.Close() }) It("upgrades NGF with zero downtime", func() { + Skip("Skipping test until version 2.1.0 since 2.0.0 is a breaking change") nginxImage := *nginxImageRepository if *plusEnabled { nginxImage = *nginxPlusImageRepository diff --git a/tests/suite/upstream_settings_test.go b/tests/suite/upstream_settings_test.go index 23fceb768d..f2b02b1059 100644 --- a/tests/suite/upstream_settings_test.go +++ b/tests/suite/upstream_settings_test.go @@ -31,6 +31,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic namespace = "uspolicy" gatewayName = "gateway" + + nginxPodName string ) zoneSize := "512k" @@ -48,9 +50,20 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -117,13 +130,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic var conf *framework.Payload BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) @@ -302,13 +310,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic var conf *framework.Payload BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) })