-
Notifications
You must be signed in to change notification settings - Fork 103
Test to confirm HCP cluster can accept and use user provided pull secrets #3637
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 5 commits
bcb8cd2
429efdd
c855856
30db437
a3d8b5b
13bd2d0
95b2b62
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,334 @@ | ||
| // Copyright 2025 Microsoft Corporation | ||
| // | ||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||
| // you may not use this file except in compliance with the License. | ||
| // You may obtain a copy of the License at | ||
| // | ||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||
| // | ||
| // Unless required by applicable law or agreed to in writing, software | ||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| // See the License for the specific language governing permissions and | ||
| // limitations under the License. | ||
|
|
||
| package e2e | ||
|
|
||
| import ( | ||
| "context" | ||
| "encoding/base64" | ||
| "encoding/json" | ||
| "fmt" | ||
| "os" | ||
| "path/filepath" | ||
| "time" | ||
|
|
||
| . "github.com/onsi/ginkgo/v2" | ||
| . "github.com/onsi/gomega" | ||
|
|
||
| corev1 "k8s.io/api/core/v1" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
| "k8s.io/apimachinery/pkg/runtime/schema" | ||
| "k8s.io/client-go/dynamic" | ||
| "k8s.io/client-go/kubernetes" | ||
|
|
||
| "github.com/Azure/ARO-HCP/test/util/framework" | ||
| "github.com/Azure/ARO-HCP/test/util/labels" | ||
| "github.com/Azure/ARO-HCP/test/util/verifiers" | ||
| ) | ||
|
|
||
| var _ = Describe("Cluster Pull Secret Management", func() { | ||
| BeforeEach(func() { | ||
| // per test initialization | ||
| }) | ||
|
|
||
| It("should be able to create an HCP cluster and manage pull secrets", | ||
| labels.RequireNothing, | ||
| labels.Critical, | ||
| labels.Positive, | ||
| labels.AroRpApiCompatible, | ||
| func(ctx context.Context) { | ||
| const ( | ||
| customerClusterName = "pullsecret-hcp-cluster" | ||
| testPullSecretHost = "host.example.com" | ||
| testPullSecretPassword = "my_password" | ||
| testPullSecretEmail = "[email protected]" | ||
| pullSecretName = "additional-pull-secret" | ||
| pullSecretNamespace = "kube-system" | ||
| ) | ||
| tc := framework.NewTestContext() | ||
|
|
||
| By("creating a resource group") | ||
| resourceGroup, err := tc.NewResourceGroup(ctx, "pullsecret-test", tc.Location()) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating cluster parameters") | ||
| clusterParams := framework.NewDefaultClusterParams() | ||
| clusterParams.ClusterName = customerClusterName | ||
| managedResourceGroupName := framework.SuffixName(*resourceGroup.Name, "-managed", 64) | ||
| clusterParams.ManagedResourceGroupName = managedResourceGroupName | ||
|
|
||
| By("creating customer resources") | ||
| clusterParams, err = tc.CreateClusterCustomerResources(ctx, | ||
| resourceGroup, | ||
| clusterParams, | ||
| map[string]interface{}{ | ||
| "persistTagValue": false, | ||
| }, | ||
| TestArtifactsFS, | ||
| ) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("Creating the cluster") | ||
| err = tc.CreateHCPClusterFromParam(ctx, | ||
| GinkgoLogr, | ||
| *resourceGroup.Name, | ||
| clusterParams, | ||
| 45*time.Minute, | ||
| ) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
| By("Creating the node pool") | ||
| nodePoolParams := framework.NewDefaultNodePoolParams() | ||
| nodePoolParams.NodePoolName = "np-1" | ||
| nodePoolParams.ClusterName = customerClusterName | ||
| nodePoolParams.Replicas = int32(2) | ||
| err = tc.CreateNodePoolFromParam(ctx, | ||
| *resourceGroup.Name, | ||
| customerClusterName, | ||
| nodePoolParams, | ||
| 15*time.Minute, | ||
| ) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("getting credentials") | ||
| adminRESTConfig, err := tc.GetAdminRESTConfigForHCPCluster( | ||
| ctx, | ||
| tc.Get20240610ClientFactoryOrDie(ctx).NewHcpOpenShiftClustersClient(), | ||
| *resourceGroup.Name, | ||
| customerClusterName, | ||
| 10*time.Minute, | ||
| ) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("ensuring the cluster is viable") | ||
| err = verifiers.VerifyHCPCluster(ctx, adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating kubernetes client") | ||
| kubeClient, err := kubernetes.NewForConfig(adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating test pull secret") | ||
| username := "test-user" | ||
| auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + testPullSecretPassword)) | ||
|
|
||
| testPullSecret, err := framework.CreateTestDockerConfigSecret( | ||
| testPullSecretHost, | ||
| username, | ||
| testPullSecretPassword, | ||
| testPullSecretEmail, | ||
| pullSecretName, | ||
| pullSecretNamespace, | ||
| ) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating the test pull secret in the cluster") | ||
| _, err = kubeClient.CoreV1().Secrets(pullSecretNamespace).Create(ctx, testPullSecret, metav1.CreateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("waiting for HCCO to merge the additional pull secret with the global pull secret") | ||
| Eventually(func() error { | ||
| return verifiers.VerifyPullSecretMergedIntoGlobal(testPullSecretHost).Verify(ctx, adminRESTConfig) | ||
| }, 300*time.Second, 15*time.Second).Should(Succeed(), "additional pull secret should be merged into global-pull-secret by HCCO") | ||
mgahagan73 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| By("verifying the DaemonSet for global pull secret synchronization is created") | ||
| Eventually(func() error { | ||
| return verifiers.VerifyGlobalPullSecretSyncer().Verify(ctx, adminRESTConfig) | ||
| }, 60*time.Second, 10*time.Second).Should(Succeed(), "global-pull-secret-syncer DaemonSet should be created") | ||
|
|
||
| By("verifying the pull secret was merged into the global pull secret") | ||
| err = verifiers.VerifyPullSecretAuthData( | ||
| "global-pull-secret", | ||
| pullSecretNamespace, | ||
| testPullSecretHost, | ||
| auth, | ||
| testPullSecretEmail, | ||
| ).Verify(ctx, adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("reading pull-secret file from aro-hcp-qe-pull-secret directory") | ||
| pullSecretFilePath := filepath.Join(tc.PullSecretPath(), "pull-secret") | ||
| pullSecretFileData, err := os.ReadFile(pullSecretFilePath) | ||
| Expect(err).NotTo(HaveOccurred(), "failed to read pull-secret file from %s", pullSecretFilePath) | ||
|
|
||
| By("parsing pull-secret file") | ||
| var pullSecretConfig framework.DockerConfigJSON | ||
| err = json.Unmarshal(pullSecretFileData, &pullSecretConfig) | ||
| Expect(err).NotTo(HaveOccurred(), "failed to parse pull-secret file") | ||
|
|
||
| By("extracting registry.redhat.io credentials") | ||
| const redhatRegistryHost = "registry.redhat.io" | ||
| redhatRegistryAuth, ok := pullSecretConfig.Auths[redhatRegistryHost] | ||
| Expect(ok).To(BeTrue(), "registry.redhat.io credentials not found in pull-secret file") | ||
|
|
||
| redhatRegistryAuthString := redhatRegistryAuth.Auth | ||
| redhatRegistryEmail := redhatRegistryAuth.Email | ||
|
|
||
| By("updating additional-pull-secret to add registry.redhat.io credentials") | ||
| // Get the current additional-pull-secret | ||
| currentSecret, err := kubeClient.CoreV1().Secrets(pullSecretNamespace).Get(ctx, pullSecretName, metav1.GetOptions{}) | ||
| Expect(err).NotTo(HaveOccurred(), "failed to get existing additional-pull-secret") | ||
|
|
||
| // Parse the current dockerconfigjson | ||
| var currentConfig framework.DockerConfigJSON | ||
| err = json.Unmarshal(currentSecret.Data[corev1.DockerConfigJsonKey], ¤tConfig) | ||
| Expect(err).NotTo(HaveOccurred(), "failed to parse current pull secret") | ||
|
|
||
| // Add registry.redhat.io credentials to the existing auths | ||
| currentConfig.Auths[redhatRegistryHost] = framework.RegistryAuth{ | ||
| Auth: redhatRegistryAuthString, | ||
| Email: redhatRegistryEmail, | ||
| } | ||
|
|
||
| // Marshal back to JSON | ||
| updatedDockerConfigJSON, err := json.Marshal(currentConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| // Update the secret | ||
| currentSecret.Data[corev1.DockerConfigJsonKey] = updatedDockerConfigJSON | ||
| _, err = kubeClient.CoreV1().Secrets(pullSecretNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("waiting for HCCO to merge the updated pull secret (with registry.redhat.io) into global pull secret") | ||
| Eventually(func() error { | ||
| return verifiers.VerifyPullSecretMergedIntoGlobal(redhatRegistryHost).Verify(ctx, adminRESTConfig) | ||
| }, 300*time.Second, 15*time.Second).Should(Succeed(), "registry.redhat.io pull secret should be merged into global-pull-secret by HCCO") | ||
|
|
||
| By("verifying both test registries are now in the global pull secret") | ||
| err = verifiers.VerifyPullSecretMergedIntoGlobal(testPullSecretHost).Verify(ctx, adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred(), "host.example.com should still be in global-pull-secret") | ||
|
|
||
| err = verifiers.VerifyPullSecretAuthData( | ||
| "global-pull-secret", | ||
| pullSecretNamespace, | ||
| redhatRegistryHost, | ||
| redhatRegistryAuthString, | ||
| redhatRegistryEmail, | ||
| ).Verify(ctx, adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating dynamic client for operator installation") | ||
| dynamicClient, err := dynamic.NewForConfig(adminRESTConfig) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating namespace for NFD operator") | ||
| const nfdNamespace = "openshift-nfd" | ||
| _, err = kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: nfdNamespace, | ||
| }, | ||
| }, metav1.CreateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating OperatorGroup for NFD operator") | ||
| operatorGroupGVR := schema.GroupVersionResource{ | ||
| Group: "operators.coreos.com", | ||
| Version: "v1", | ||
| Resource: "operatorgroups", | ||
| } | ||
| operatorGroup := &unstructured.Unstructured{ | ||
| Object: map[string]interface{}{ | ||
| "apiVersion": "operators.coreos.com/v1", | ||
| "kind": "OperatorGroup", | ||
| "metadata": map[string]interface{}{ | ||
| "name": "nfd-operator-group", | ||
| "namespace": nfdNamespace, | ||
| }, | ||
| "spec": map[string]interface{}{ | ||
| "targetNamespaces": []interface{}{nfdNamespace}, | ||
| }, | ||
| }, | ||
| } | ||
| _, err = dynamicClient.Resource(operatorGroupGVR).Namespace(nfdNamespace).Create(ctx, operatorGroup, metav1.CreateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("creating Subscription for NFD operator from redhat-operators catalog") | ||
| subscriptionGVR := schema.GroupVersionResource{ | ||
| Group: "operators.coreos.com", | ||
| Version: "v1alpha1", | ||
| Resource: "subscriptions", | ||
| } | ||
| subscription := &unstructured.Unstructured{ | ||
| Object: map[string]interface{}{ | ||
| "apiVersion": "operators.coreos.com/v1alpha1", | ||
| "kind": "Subscription", | ||
| "metadata": map[string]interface{}{ | ||
| "name": "nfd", | ||
| "namespace": nfdNamespace, | ||
| }, | ||
| "spec": map[string]interface{}{ | ||
| "channel": "stable", | ||
| "name": "nfd", | ||
| "source": "redhat-operators", | ||
| "sourceNamespace": "openshift-marketplace", | ||
| "installPlanApproval": "Automatic", | ||
|
Comment on lines
+295
to
+300
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we're relying on an external source and that it never gets removed. Is there a better image to just create a pod and ensure the image pulled successfully rather than leveraging the marketplace images? |
||
| }, | ||
| }, | ||
| } | ||
| _, err = dynamicClient.Resource(subscriptionGVR).Namespace(nfdNamespace).Create(ctx, subscription, metav1.CreateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("waiting for NFD operator to be installed") | ||
| Eventually(func() error { | ||
| return verifiers.VerifyOperatorInstalled(nfdNamespace, "nfd").Verify(ctx, adminRESTConfig) | ||
| }, 300*time.Second, 15*time.Second).Should(Succeed(), "NFD operator should be installed successfully") | ||
mgahagan73 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| By("creating NodeFeatureDiscovery CR to deploy NFD worker") | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This feels like we're testing NFD, which doesn't seem relevant to the "global pull secret" test itself. |
||
| nfdGVR := schema.GroupVersionResource{ | ||
| Group: "nfd.openshift.io", | ||
| Version: "v1", | ||
| Resource: "nodefeaturediscoveries", | ||
| } | ||
| nfdCR := &unstructured.Unstructured{ | ||
| Object: map[string]interface{}{ | ||
| "apiVersion": "nfd.openshift.io/v1", | ||
| "kind": "NodeFeatureDiscovery", | ||
| "metadata": map[string]interface{}{ | ||
| "name": "nfd-instance", | ||
| "namespace": nfdNamespace, | ||
| }, | ||
| "spec": map[string]interface{}{ | ||
| "operand": map[string]interface{}{ | ||
| "image": "registry.redhat.io/openshift4/ose-node-feature-discovery:latest", | ||
| }, | ||
| }, | ||
| }, | ||
| } | ||
| _, err = dynamicClient.Resource(nfdGVR).Namespace(nfdNamespace).Create(ctx, nfdCR, metav1.CreateOptions{}) | ||
| Expect(err).NotTo(HaveOccurred()) | ||
|
|
||
| By("waiting for NFD worker DaemonSet to be created") | ||
| Eventually(func() error { | ||
| daemonSets, err := kubeClient.AppsV1().DaemonSets(nfdNamespace).List(ctx, metav1.ListOptions{}) | ||
| if err != nil { | ||
| return err | ||
| } | ||
| for _, ds := range daemonSets.Items { | ||
| if ds.Name == "nfd-worker" { | ||
| if ds.Status.DesiredNumberScheduled > 0 && ds.Status.NumberReady > 0 { | ||
| return nil | ||
| } | ||
| return fmt.Errorf("nfd-worker DaemonSet found but not ready: desired=%d, ready=%d", | ||
| ds.Status.DesiredNumberScheduled, ds.Status.NumberReady) | ||
| } | ||
| } | ||
| return fmt.Errorf("nfd-worker DaemonSet not found") | ||
| }, 300*time.Second, 15*time.Second).Should(Succeed(), "NFD worker DaemonSet should be created and have ready pods") | ||
|
|
||
| By("waiting for NFD worker pods to be created and verify images from registry.redhat.io can be pulled") | ||
| Eventually(func() error { | ||
| return verifiers.VerifyImagePulled(nfdNamespace, "registry.redhat.io", "ose-node-feature-discovery").Verify(ctx, adminRESTConfig) | ||
| }, 300*time.Second, 15*time.Second).Should(Succeed(), "NFD worker images from registry.redhat.io should be pulled successfully with the added pull secret") | ||
| }) | ||
| }) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
it would be good to track status changes between iteration of the verifier.
Is there also any other use case of this verifier outside of this test? Why not inline it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
update as i'm reviewing this:
I can see the reuse within the test itself. I'm fine leaving it in verifiers, but most likely no one else will use it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The idea is any future test that needs to add pull secrets can use the verifier to make things easier.