Skip to content

Commit

Permalink
[CNS-UnRegisterVolume-API]: Automation tests for CNS-UnRegisterVolume…
Browse files Browse the repository at this point in the history
… API Feature
  • Loading branch information
rpanduranga committed Sep 5, 2024
1 parent e03934b commit 1c13c2b
Show file tree
Hide file tree
Showing 2 changed files with 363 additions and 0 deletions.
290 changes: 290 additions & 0 deletions tests/e2e/cns_unregister_volume_api.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,290 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"os"
"strconv"
"time"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
admissionapi "k8s.io/pod-security-admission/api"
)

var _ = ginkgo.Describe("[csi-guest] [csi-supervisor] CNS Unregister Volume", func() {
f := framework.NewDefaultFramework("cns-unregister-volume")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
const defaultVolumeOpsScale = 30
const defaultVolumeOpsScaleWCP = 29
var (
client clientset.Interface
c clientset.Interface
fullSyncWaitTime int
namespace string
scParameters map[string]string
storagePolicyName string
volumeOpsScale int
isServiceStopped bool
serviceName string
csiReplicaCount int32
deployment *appsv1.Deployment
)

ginkgo.BeforeEach(func() {
bootstrap()
client = f.ClientSet
namespace = getNamespaceToRunTests(f)
scParameters = make(map[string]string)
isServiceStopped = false
storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")

if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}

if guestCluster {
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, rqLimit)
}

if os.Getenv("VOLUME_OPS_SCALE") != "" {
volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
if vanillaCluster {
volumeOpsScale = defaultVolumeOpsScale
} else {
volumeOpsScale = defaultVolumeOpsScaleWCP
}
}
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale)

if os.Getenv(envFullSyncWaitTime) != "" {
fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s
if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime {
framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime)
}
} else {
fullSyncWaitTime = defaultFullSyncWaitTime
}

// Get CSI Controller's replica count from the setup
controllerClusterConfig := os.Getenv(contollerClusterKubeConfig)
c = client
if controllerClusterConfig != "" {
framework.Logf("Creating client for remote kubeconfig")
remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
c = remoteC
}
deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx,
vSphereCSIControllerPodNamePrefix, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
csiReplicaCount = *deployment.Spec.Replicas
})

ginkgo.AfterEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if isServiceStopped {
if serviceName == "CSI" {
framework.Logf("Starting CSI driver")
ignoreLabels := make(map[string]string)
err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix,
csiSystemNamespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

// Wait for the CSI Pods to be up and Running
list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
num_csi_pods := len(list_of_pods)
err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0,
pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else if serviceName == hostdServiceName {
framework.Logf("In afterEach function to start the hostd service on all hosts")
hostIPs := getAllHostsIP(ctx, true)
for _, hostIP := range hostIPs {
startHostDOnHost(ctx, hostIP)
}
} else {
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort
ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName))
err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}

ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec))
updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec)

if supervisorCluster {
deleteResourceQuota(client, namespace)
dumpSvcNsEventsOnTestFailure(client, namespace)
}
if guestCluster {
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, defaultrqLimit)
dumpSvcNsEventsOnTestFailure(svcClient, svNamespace)
}
})

ginkgo.It("export detached volume", func() {
serviceName = vsanhealthServiceName
exportDetachedVolume(namespace, client, storagePolicyName, scParameters,
volumeOpsScale, true)
})
})

func exportDetachedVolume(namespace string, client clientset.Interface,
storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var storageclass *storagev1.StorageClass
var persistentvolumes []*v1.PersistentVolume
var pvclaims []*v1.PersistentVolumeClaim
var err error
//var fullSyncWaitTime int
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale)

// Get a config to talk to the apiserver
restConfig := getRestConfigClient()

framework.Logf("storagePolicyName %v", storagePolicyName)
framework.Logf("extendVolume %v", extendVolume)

if supervisorCluster {
ginkgo.By("CNS_TEST: Running for WCP setup")
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision)
if thickProvPolicy == "" {
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set")
}
profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy)
scParameters[scParamStoragePolicyID] = profileID
// create resource quota
createResourceQuota(client, namespace, rqLimit, thickProvPolicy)
storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
ginkgo.By("CNS_TEST: Running for GC setup")
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision)
if thickProvPolicy == "" {
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set")
}
createResourceQuota(client, namespace, rqLimit, thickProvPolicy)
scParameters[svStorageClassName] = thickProvPolicy
scParameters[scParamFsType] = ext4FSType
storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
var allowExpansion = true
storageclass.AllowVolumeExpansion = &allowExpansion
storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("Creating PVCs using the Storage Class")
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale)
for i := 0; i < volumeOpsScale; i++ {
framework.Logf("Creating pvc%v", i)
pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace,
getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, ""))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

ginkgo.By("Waiting for all claims to be in bound state")
persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims,
2*framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

// TODO: Add a logic to check for the no orphan volumes
defer func() {
for _, claim := range pvclaims {
err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
ginkgo.By("Verify PVs, volumes are deleted from CNS")
for _, pv := range persistentvolumes {
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll,
framework.PodDeleteTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volumeID := pv.Spec.CSI.VolumeHandle
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Volume: %s should not be present in the "+
"CNS after it is deleted from kubernetes", volumeID))
}
}()
for _, pv := range persistentvolumes {
volumeID := pv.Spec.CSI.VolumeHandle
time.Sleep(30 * time.Second)

ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle)
cnsUnRegisterVolume := getCNSUnRegisterVolumeSpec(ctx, namespace, volumeID)
err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetCreated(ctx,
restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout))
cnsRegisterVolumeName := cnsUnRegisterVolume.GetName()
framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName)
}

ginkgo.By("Verify PVs, volumes are deleted from CNS")
for _, pv := range persistentvolumes {
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll,
framework.PodDeleteTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volumeID := pv.Spec.CSI.VolumeHandle
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Volume: %s should not be present in the "+
"CNS after it is deleted from kubernetes", volumeID))
}

defaultDatastore = getDefaultDatastore(ctx)
ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore))

for _, pv1 := range persistentvolumes {
ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle))
err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(),
[]string{disklibUnlinkErr}, []string{objOrItemNotFoundErr})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
73 changes: 73 additions & 0 deletions tests/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ import (
cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1"
cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1"
cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1"
cnsunregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsunregistervolume/v1alpha1"
cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1"
storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2"
k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes"
Expand Down Expand Up @@ -3243,6 +3244,31 @@ func getCNSRegisterVolumeSpec(ctx context.Context, namespace string, fcdID strin
return cnsRegisterVolume
}

// Function to create CnsUnRegisterVolume spec, with given FCD ID
func getCNSUnRegisterVolumeSpec(ctx context.Context, namespace string,
fcdID string) *cnsunregistervolumev1alpha1.CnsUnregisterVolume {
var (
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume
)
framework.Logf("get CNSUnRegisterVolume spec")

cnsUnRegisterVolume = &cnsunregistervolumev1alpha1.CnsUnregisterVolume{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cnsunregvol-",
Namespace: namespace,
},
Spec: cnsunregistervolumev1alpha1.CnsUnregisterVolumeSpec{
VolumeID: fcdID,
},
}

if fcdID != "" {
cnsUnRegisterVolume.Spec.VolumeID = fcdID
}
return cnsUnRegisterVolume
}

// Create CNS register volume.
func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config,
cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) error {
Expand All @@ -3255,6 +3281,18 @@ func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config,
return err
}

// Create CNS Unregister volume.
func createCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config,
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error {

cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Create CNSUnRegisterVolume")
err = cnsOperatorClient.Create(ctx, cnsUnRegisterVolume)

return err
}

// Query CNS Register volume. Returns true if the CNSRegisterVolume is
// available otherwise false.
func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config,
Expand Down Expand Up @@ -3329,6 +3367,21 @@ func getCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config,
return cns
}

// Get CNS Unregister volume.
func getCNSUnRegistervolume(ctx context.Context,
restClientConfig *rest.Config, cnsUnRegisterVolume *cnsunregistervolumev1alpha1.
CnsUnregisterVolume) *cnsunregistervolumev1alpha1.CnsUnregisterVolume {
cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{}
err = cnsOperatorClient.Get(ctx,
pkgtypes.NamespacedName{Name: cnsUnRegisterVolume.Name, Namespace: cnsUnRegisterVolume.Namespace}, cns)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

return cns
}

// Update CNS register volume.
func updateCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config,
cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) *cnsregistervolumev1alpha1.CnsRegisterVolume {
Expand Down Expand Up @@ -4051,6 +4104,26 @@ func waitForCNSRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest.
return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsRegisterVolumeName, timeout)
}

// waitForCNSUnRegisterVolumeToGetCreated waits for a cnsUnRegisterVolume to get
// created or until timeout occurs, whichever comes first.
func waitForCNSUnRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest.Config, namespace string,
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume, Poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for CnsUnRegisterVolume %s to get created", timeout, cnsUnRegisterVolume)

cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName()
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
cnsUnRegisterVolume = getCNSUnRegistervolume(ctx, restConfig, cnsUnRegisterVolume)
flag := cnsUnRegisterVolume.Status.Unregistered
if !flag {
continue
} else {
return nil
}
}

return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsUnRegisterVolumeName, timeout)
}

// waitForCNSRegisterVolumeToGetDeleted waits for a cnsRegisterVolume to get
// deleted or until timeout occurs, whichever comes first.
func waitForCNSRegisterVolumeToGetDeleted(ctx context.Context, restConfig *rest.Config, namespace string,
Expand Down

0 comments on commit 1c13c2b

Please sign in to comment.