Skip to content

Commit

Permalink
e2e: refactor retain test
Browse files Browse the repository at this point in the history
Signed-off-by: changzhen <[email protected]>
  • Loading branch information
XiShanYongYe-Chang committed Nov 25, 2022
1 parent 149e0fb commit f4353a1
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 66 deletions.
32 changes: 18 additions & 14 deletions test/e2e/framework/workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package framework

import (
"context"
"encoding/json"
"fmt"

"github.com/onsi/ginkgo/v2"
Expand All @@ -20,7 +19,7 @@ import (

var workloadGVR = workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")

// CreateWorkload create Workload with dynamic client
// CreateWorkload creates Workload with dynamic client
func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload) {
ginkgo.By(fmt.Sprintf("Creating workload(%s/%s)", workload.Namespace, workload.Name), func() {
unstructuredObj, err := helper.ToUnstructured(workload)
Expand All @@ -31,7 +30,7 @@ func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
})
}

// UpdateWorkload update Workload with dynamic client
// UpdateWorkload updates Workload with dynamic client
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) {
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
newUnstructuredObj, err := helper.ToUnstructured(workload)
Expand All @@ -44,38 +43,34 @@ func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
})
}

// GetWorkload get Workload with dynamic client.
// GetWorkload gets Workload with dynamic client.
func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1alpha1.Workload {
workload := workloadv1alpha1.Workload{}
workload := &workloadv1alpha1.Workload{}

ginkgo.By(fmt.Sprintf("Get workload(%s/%s)", namespace, name), func() {
var err error
unstructuredObj := &unstructured.Unstructured{}

gomega.Eventually(func() error {
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())

result, err := unstructuredObj.MarshalJSON()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

err = json.Unmarshal(result, &workload)
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})

return &workload
return workload
}

// RemoveWorkload delete Workload with dynamic client.
// RemoveWorkload deletes Workload with dynamic client.
func RemoveWorkload(client dynamic.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Remove workload(%s/%s)", namespace, name), func() {
err := client.Resource(workloadGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// WaitWorkloadPresentOnClusterFitWith wait workload present on member clusters sync with fit func.
// WaitWorkloadPresentOnClusterFitWith waits workload present on member cluster sync with fit func.
func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
clusterClient := GetClusterDynamicClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
Expand All @@ -93,7 +88,16 @@ func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit fu
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}

// WaitWorkloadDisappearOnCluster wait workload disappear on cluster until timeout.
// WaitWorkloadPresentOnClustersFitWith waits workload present on member clusters sync with fit func.
func WaitWorkloadPresentOnClustersFitWith(clusters []string, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
ginkgo.By(fmt.Sprintf("Waiting for workload(%s/%s) synced on member clusters fit with func", namespace, name), func() {
for _, clusterName := range clusters {
WaitWorkloadPresentOnClusterFitWith(clusterName, namespace, name, fit)
}
})
}

// WaitWorkloadDisappearOnCluster waits workload disappear on cluster until timeout.
func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
clusterClient := GetClusterDynamicClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
Expand Down
90 changes: 38 additions & 52 deletions test/e2e/resourceinterpreter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
testhelper "github.com/karmada-io/karmada/test/helper"
Expand Down Expand Up @@ -81,41 +82,37 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
})

ginkgo.Context("InterpreterOperation Retain testing", func() {
var waitTime time.Duration
var updatedPaused bool

ginkgo.BeforeEach(func() {
waitTime = 5 * time.Second
updatedPaused = true

policy.Spec.Placement.ClusterAffinity.ClusterNames = framework.ClusterNames()
})

ginkgo.It("Retain testing", func() {
ginkgo.By("update workload's spec.paused to true", func() {
for _, cluster := range framework.ClusterNames() {
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())

memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
memberWorkload.Spec.Paused = updatedPaused
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster)
ginkgo.By("wait workload exist on the member clusters", func() {
for _, clusterName := range framework.ClusterNames() {
framework.WaitWorkloadPresentOnClusterFitWith(clusterName, workload.Namespace, workload.Name,
func(_ *workloadv1alpha1.Workload) bool {
return true
})
}
})

// Wait executeController to reconcile then check if it is retained
time.Sleep(waitTime)
ginkgo.By("check if workload's spec.paused is retained", func() {
for _, cluster := range framework.ClusterNames() {
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
ginkgo.By("update workload on the control plane", func() {
gomega.Eventually(func(g gomega.Gomega) error {
curWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName)
// construct two values that need to be changed, and only one value is retained.
curWorkload.Spec.Replicas = pointer.Int32Ptr(2)
curWorkload.Spec.Paused = true

gomega.Eventually(func(g gomega.Gomega) (bool, error) {
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
newUnstructuredObj, err := helper.ToUnstructured(curWorkload)
g.Expect(err).ShouldNot(gomega.HaveOccurred())

return memberWorkload.Spec.Paused, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
}
workloadGVR := workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
_, err = dynamicClient.Resource(workloadGVR).Namespace(curWorkload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})

ginkgo.By("check if workload's spec.paused is retained", func() {
framework.WaitWorkloadPresentOnClustersFitWith(framework.ClusterNames(), workload.Namespace, workload.Name,
func(workload *workloadv1alpha1.Workload) bool {
return *workload.Spec.Replicas == 2 && !workload.Spec.Paused
})
})
})
})
Expand Down Expand Up @@ -424,12 +421,7 @@ end`,
})

ginkgo.Context("InterpreterOperation Retain testing", func() {
var waitTime time.Duration
var updatedPaused bool

ginkgo.BeforeEach(func() {
waitTime = 5 * time.Second
updatedPaused = true
customization = testhelper.NewResourceInterpreterCustomization(
"interpreter-customization"+rand.String(RandomStrLength),
configv1alpha1.CustomizationTarget{
Expand All @@ -448,30 +440,24 @@ end`,
})

ginkgo.It("Retain testing", func() {
ginkgo.By("update deployment's spec.paused to true", func() {
clusterClient := framework.GetClusterClient(targetCluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
var memberDeploy *appsv1.Deployment
ginkgo.By("wait deployment exist on the member clusters", func() {
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
memberDeploy = deployment
func(_ *appsv1.Deployment) bool {
return true
})
framework.UpdateDeploymentPaused(clusterClient, memberDeploy, updatedPaused)
})

// Wait executeController to reconcile then check if it is retained
time.Sleep(waitTime)
ginkgo.By("update deployment on the control plane", func() {
// construct two values that need to be changed, and only one value is retained.
framework.UpdateDeploymentPaused(kubeClient, deployment, true)
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
})

ginkgo.By("check if deployment's spec.paused is retained", func() {
gomega.Eventually(func(g gomega.Gomega) bool {
var memberDeployment *appsv1.Deployment
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
memberDeployment = deployment
return true
})
return memberDeployment.Spec.Paused
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return *deployment.Spec.Replicas == 2 && !deployment.Spec.Paused
})
})
})
})
Expand Down

0 comments on commit f4353a1

Please sign in to comment.