Skip to content

Commit 739d2c3

Browse files
authored
Merge pull request #2939 from Poor12/automated-cherry-pick-of-#2930-upstream-release-1.2
Automated cherry pick of #2930: fix work status not sync to control plane
2 parents fbddb6a + 882f9e4 commit 739d2c3

File tree

5 files changed

+135
-23
lines changed

5 files changed

+135
-23
lines changed

pkg/controllers/status/cluster_status_controller.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,6 @@ func (c *ClusterStatusController) syncClusterStatus(cluster *clusterv1alpha1.Clu
142142
if !online && readyCondition.Status != metav1.ConditionTrue {
143143
klog.V(2).Infof("Cluster(%s) still offline after %s, ensuring offline is set.",
144144
cluster.Name, c.ClusterFailureThreshold.Duration)
145-
c.InformerManager.Stop(cluster.Name)
146145
setTransitionTime(cluster.Status.Conditions, readyCondition)
147146
meta.SetStatusCondition(&currentClusterStatus.Conditions, *readyCondition)
148147
return c.updateStatusIfNeeded(cluster, currentClusterStatus)

test/e2e/framework/deployment.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,13 @@ func WaitDeploymentDisappearOnClusters(clusters []string, namespace, name string
8484
// UpdateDeploymentReplicas update deployment's replicas.
8585
func UpdateDeploymentReplicas(client kubernetes.Interface, deployment *appsv1.Deployment, replicas int32) {
8686
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s replicas to %d", deployment.Namespace, deployment.Name, replicas), func() {
87-
deployment.Spec.Replicas = &replicas
8887
gomega.Eventually(func() error {
89-
_, err := client.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
88+
deploy, err := client.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
89+
if err != nil {
90+
return err
91+
}
92+
deploy.Spec.Replicas = &replicas
93+
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
9094
return err
9195
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
9296
})

test/e2e/propagationpolicy_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
9090
serviceNamespace = policyNamespace
9191
serviceName = policyName
9292

93-
service = testhelper.NewService(serviceNamespace, serviceName)
93+
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeClusterIP)
9494
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
9595
{
9696
APIVersion: service.APIVersion,

test/e2e/resource_test.go

Lines changed: 126 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,13 @@ import (
1818
"k8s.io/apimachinery/pkg/util/wait"
1919
"k8s.io/klog/v2"
2020

21+
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
2122
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
2223
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
24+
"github.com/karmada-io/karmada/pkg/util"
2325
"github.com/karmada-io/karmada/pkg/util/names"
2426
"github.com/karmada-io/karmada/test/e2e/framework"
25-
"github.com/karmada-io/karmada/test/helper"
27+
testhelper "github.com/karmada-io/karmada/test/helper"
2628
)
2729

2830
var _ = ginkgo.Describe("[resource-status collection] resource status collection testing", func() {
@@ -46,8 +48,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
4648
deploymentNamespace = testNamespace
4749
deploymentName = policyName
4850

49-
deployment = helper.NewDeployment(deploymentNamespace, deploymentName)
50-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
51+
deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName)
52+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
5153
{
5254
APIVersion: deployment.APIVersion,
5355
Kind: deployment.Kind,
@@ -123,9 +125,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
123125
serviceNamespace = testNamespace
124126
serviceName = policyName
125127

126-
service = helper.NewService(serviceNamespace, serviceName)
127-
service.Spec.Type = corev1.ServiceTypeLoadBalancer
128-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
128+
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeLoadBalancer)
129+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
129130
{
130131
APIVersion: service.APIVersion,
131132
Kind: service.Kind,
@@ -196,9 +197,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
196197
serviceNamespace = testNamespace
197198
serviceName = policyName
198199

199-
service = helper.NewService(serviceNamespace, serviceName)
200-
service.Spec.Type = corev1.ServiceTypeNodePort
201-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
200+
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeNodePort)
201+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
202202
{
203203
APIVersion: service.APIVersion,
204204
Kind: service.Kind,
@@ -266,8 +266,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
266266
ingNamespace = testNamespace
267267
ingName = policyName
268268

269-
ingress = helper.NewIngress(ingNamespace, ingName)
270-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
269+
ingress = testhelper.NewIngress(ingNamespace, ingName)
270+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
271271
{
272272
APIVersion: ingress.APIVersion,
273273
Kind: ingress.Kind,
@@ -338,8 +338,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
338338
jobNamespace = testNamespace
339339
jobName = policyName
340340

341-
job = helper.NewJob(jobNamespace, jobName)
342-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
341+
job = testhelper.NewJob(jobNamespace, jobName)
342+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
343343
{
344344
APIVersion: job.APIVersion,
345345
Kind: job.Kind,
@@ -392,8 +392,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
392392
daemonSetNamespace = testNamespace
393393
daemonSetName = policyName
394394

395-
daemonSet = helper.NewDaemonSet(daemonSetNamespace, daemonSetName)
396-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
395+
daemonSet = testhelper.NewDaemonSet(daemonSetNamespace, daemonSetName)
396+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
397397
{
398398
APIVersion: daemonSet.APIVersion,
399399
Kind: daemonSet.Kind,
@@ -479,8 +479,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
479479
statefulSetNamespace = testNamespace
480480
statefulSetName = policyName
481481

482-
statefulSet = helper.NewStatefulSet(statefulSetNamespace, statefulSetName)
483-
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
482+
statefulSet = testhelper.NewStatefulSet(statefulSetNamespace, statefulSetName)
483+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
484484
{
485485
APIVersion: statefulSet.APIVersion,
486486
Kind: statefulSet.Kind,
@@ -545,3 +545,112 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
545545
})
546546
})
547547
})
548+
549+
var _ = framework.SerialDescribe("workload status synchronization testing", func() {
550+
ginkgo.Context("Deployment status synchronization when cluster failed and recovered soon", func() {
551+
var policyNamespace, policyName string
552+
var deploymentNamespace, deploymentName string
553+
var deployment *appsv1.Deployment
554+
var policy *policyv1alpha1.PropagationPolicy
555+
var originalReplicas, numOfFailedClusters int
556+
557+
ginkgo.BeforeEach(func() {
558+
policyNamespace = testNamespace
559+
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
560+
deploymentNamespace = testNamespace
561+
deploymentName = policyName
562+
deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName)
563+
numOfFailedClusters = 1
564+
originalReplicas = 3
565+
566+
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
567+
{
568+
APIVersion: deployment.APIVersion,
569+
Kind: deployment.Kind,
570+
Name: deployment.Name,
571+
},
572+
}, policyv1alpha1.Placement{
573+
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
574+
LabelSelector: &metav1.LabelSelector{
575+
// only test push mode clusters
576+
// because pull mode clusters cannot be disabled by changing APIEndpoint
577+
MatchLabels: pushModeClusterLabels,
578+
},
579+
},
580+
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
581+
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated,
582+
},
583+
})
584+
})
585+
586+
ginkgo.BeforeEach(func() {
587+
framework.CreatePropagationPolicy(karmadaClient, policy)
588+
framework.CreateDeployment(kubeClient, deployment)
589+
ginkgo.DeferCleanup(func() {
590+
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
591+
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
592+
})
593+
})
594+
595+
ginkgo.It("deployment status synchronization testing", func() {
596+
var disabledClusters []string
597+
targetClusterNames := framework.ExtractTargetClustersFrom(controlPlaneClient, deployment)
598+
599+
ginkgo.By("set one cluster condition status to false", func() {
600+
temp := numOfFailedClusters
601+
for _, targetClusterName := range targetClusterNames {
602+
if temp > 0 {
603+
klog.Infof("Set cluster %s to disable.", targetClusterName)
604+
err := disableCluster(controlPlaneClient, targetClusterName)
605+
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
606+
607+
// wait for the current cluster status changing to false
608+
framework.WaitClusterFitWith(controlPlaneClient, targetClusterName, func(cluster *clusterv1alpha1.Cluster) bool {
609+
return !util.IsClusterReady(&cluster.Status)
610+
})
611+
disabledClusters = append(disabledClusters, targetClusterName)
612+
temp--
613+
}
614+
}
615+
})
616+
617+
ginkgo.By("recover not ready cluster", func() {
618+
for _, disabledCluster := range disabledClusters {
619+
fmt.Printf("cluster %s is waiting for recovering\n", disabledCluster)
620+
originalAPIEndpoint := getClusterAPIEndpoint(disabledCluster)
621+
622+
err := recoverCluster(controlPlaneClient, disabledCluster, originalAPIEndpoint)
623+
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
624+
// wait for the disabled cluster recovered
625+
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
626+
currentCluster, err := util.GetCluster(controlPlaneClient, disabledCluster)
627+
g.Expect(err).ShouldNot(gomega.HaveOccurred())
628+
629+
if util.IsClusterReady(&currentCluster.Status) {
630+
fmt.Printf("cluster %s recovered\n", disabledCluster)
631+
return true, nil
632+
}
633+
return false, nil
634+
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
635+
}
636+
})
637+
638+
ginkgo.By("edit deployment in disabled cluster", func() {
639+
for _, disabledCluster := range disabledClusters {
640+
clusterClient := framework.GetClusterClient(disabledCluster)
641+
framework.UpdateDeploymentReplicas(clusterClient, deployment, updateDeploymentReplicas)
642+
// wait for the status synchronization
643+
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
644+
currentDeployment, err := clusterClient.AppsV1().Deployments(testNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
645+
g.Expect(err).ShouldNot(gomega.HaveOccurred())
646+
647+
if *currentDeployment.Spec.Replicas == int32(originalReplicas) {
648+
return true, nil
649+
}
650+
return false, nil
651+
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
652+
}
653+
})
654+
})
655+
})
656+
})

test/helper/resource.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ func NewStatefulSet(namespace string, name string) *appsv1.StatefulSet {
128128
}
129129

130130
// NewService will build a service object.
131-
func NewService(namespace string, name string) *corev1.Service {
131+
func NewService(namespace string, name string, svcType corev1.ServiceType) *corev1.Service {
132132
return &corev1.Service{
133133
TypeMeta: metav1.TypeMeta{
134134
APIVersion: "v1",
@@ -139,6 +139,7 @@ func NewService(namespace string, name string) *corev1.Service {
139139
Name: name,
140140
},
141141
Spec: corev1.ServiceSpec{
142+
Type: svcType,
142143
Ports: []corev1.ServicePort{
143144
{
144145
Name: "http",
@@ -147,7 +148,6 @@ func NewService(namespace string, name string) *corev1.Service {
147148
TargetPort: intstr.IntOrString{IntVal: 8080},
148149
},
149150
},
150-
Type: corev1.ServiceTypeClusterIP,
151151
},
152152
}
153153
}

0 commit comments

Comments
 (0)