From 72f2701028f2d576dba230b57aee3ba29487ce6e Mon Sep 17 00:00:00 2001 From: Penghao Date: Wed, 7 Jan 2026 09:29:17 +0800 Subject: [PATCH] test: add localvolume fsTypes tests Signed-off-by: Penghao --- test/e2e/aws_disks.go | 2 +- test/e2e/gomega_util.go | 2 +- test/e2e/localvolume_test.go | 467 +++++++++++++++++++++++------------ test/e2e/main_test.go | 5 +- 4 files changed, 318 insertions(+), 158 deletions(-) diff --git a/test/e2e/aws_disks.go b/test/e2e/aws_disks.go index 2ddb919a3..c997f5a3c 100644 --- a/test/e2e/aws_disks.go +++ b/test/e2e/aws_disks.go @@ -164,7 +164,7 @@ func createAndAttachAWSVolumesForNode(t *testing.T, nodeEntry nodeDisks, ec2Clie createInput := &ec2.CreateVolumeInput{ AvailabilityZone: aws.String(zone), Size: aws.Int32(int32(diskSize)), - VolumeType: "gp2", + VolumeType: "gp3", TagSpecifications: []ec2types.TagSpecification{ { ResourceType: "volume", diff --git a/test/e2e/gomega_util.go b/test/e2e/gomega_util.go index f60e1abf4..158c7dedc 100644 --- a/test/e2e/gomega_util.go +++ b/test/e2e/gomega_util.go @@ -88,7 +88,7 @@ func eventuallyFindPVs(t *testing.T, f *framework.Framework, storageClassName st matcher := gomega.NewWithT(t) matcher.Eventually(func() []corev1.PersistentVolume { pvList := &corev1.PersistentVolumeList{} - t.Log(fmt.Sprintf("waiting for %d PVs to be created with StorageClass: %q", expectedPVs, storageClassName)) + t.Logf("waiting for %d PVs to be created with StorageClass: %q", expectedPVs, storageClassName) matcher.Eventually(func() error { return f.Client.List(context.TODO(), pvList) }).ShouldNot(gomega.HaveOccurred()) diff --git a/test/e2e/localvolume_test.go b/test/e2e/localvolume_test.go index 637e32fca..f01ff3f93 100644 --- a/test/e2e/localvolume_test.go +++ b/test/e2e/localvolume_test.go @@ -61,11 +61,14 @@ func LocalVolumeTest(ctx *framework.Context, cleanupFuncs *[]cleanupFn) func(*te } // represents the disk layout to setup on the nodes. + // Extended to support testing different FSTypes: default, ext4, xfs, block nodeEnv := []nodeDisks{ { disks: []disk{ - {size: 10}, - {size: 20}, + {size: 10}, // default test disk + {size: 20}, // ext4 test disk + {size: 30}, // xfs test disk + {size: 40}, // block test disk }, node: nodeList.Items[0], }, @@ -108,127 +111,251 @@ func LocalVolumeTest(ctx *framework.Context, cleanupFuncs *[]cleanupFn) func(*te // get the device paths and IDs nodeEnv = populateDeviceInfo(t, ctx, nodeEnv) - selectedDisk := nodeEnv[0].disks[0] - matcher.Expect(selectedDisk.path).ShouldNot(gomega.BeZero(), "device path should not be empty") - - localVolume := getFakeLocalVolume(selectedNode, selectedDisk.path, namespace) - - matcher.Eventually(func() error { - t.Log("creating localvolume") - return f.Client.Create(goctx.TODO(), localVolume, &framework.CleanupOptions{TestContext: ctx}) - }, time.Minute, time.Second*2).ShouldNot(gomega.HaveOccurred(), "creating localvolume") - - // add pv and storageclass cleanup - addToCleanupFuncs( - cleanupFuncs, - "cleanupLVResources", - func(t *testing.T) error { - return cleanupLVResources(t, f, localVolume) - }, - ) - err = waitForDaemonSet(t, f.KubeClient, namespace, nodedaemon.DiskMakerName, retryInterval, timeout) - if err != nil { - t.Fatalf("error waiting for diskmaker daemonset : %v", err) - } - - err = verifyLocalVolume(t, localVolume, f.Client) - if err != nil { - t.Fatalf("error verifying localvolume cr: %v", err) + // Verify we have enough disks for all test cases + if len(nodeEnv[0].disks) < 4 { + t.Fatalf("expected at least 4 disks, got %d", len(nodeEnv[0].disks)) } - // time.Sleep(10 * time.Minute) - err = checkLocalVolumeStatus(t, localVolume) - if err != nil { - t.Fatalf("error checking localvolume condition: %v", err) - } - - pvs := eventuallyFindPVs(t, f, localVolume.Spec.StorageClassDevices[0].StorageClassName, 1) - var expectedPath string - if len(pvs) > 0 { - if selectedDisk.id != "" { - expectedPath = selectedDisk.id - } else { - expectedPath = selectedDisk.name - } - } else { - t.Fatalf("no pvs returned by eventuallyFindPVs: %+v", pvs) - } - matcher.Expect(filepath.Base(pvs[0].Spec.Local.Path)).To(gomega.Equal(expectedPath)) - - // verify pv annotation - t.Logf("looking for %q annotation on pvs", provCommon.AnnProvisionedBy) - verifyProvisionerAnnotation(t, pvs, nodeList.Items) - - // verify deletion - for _, pv := range pvs { - eventuallyDelete(t, false, &pv) - } - // verify that PVs come back after deletion - pvs = eventuallyFindPVs(t, f, localVolume.Spec.StorageClassDevices[0].StorageClassName, 1) - - // consume pvs - consumingObjectList := make([]client.Object, 0) - for _, pv := range pvs { - pvc, job, pod := consumePV(t, ctx, pv) - consumingObjectList = append(consumingObjectList, job, pvc, pod) + // Test cases for different FSType configurations and scenarios + testCases := []struct { + name string + fsType string + volumeMode localv1.PersistentVolumeMode + storageClassName string + diskIndex int + testFinalizer bool // whether to test finalizer behavior + testAnnotations bool // whether to test provisioner annotations + testSymlinks bool // whether to test symlink cleanup + }{ + { + name: "default-filesystem-with-tolerations", + fsType: "", + volumeMode: localv1.PersistentVolumeFilesystem, + storageClassName: lvStorageClassName, + diskIndex: 0, + testFinalizer: true, + testAnnotations: true, + testSymlinks: true, + }, + { + name: "ext4-filesystem", + fsType: "ext4", + volumeMode: localv1.PersistentVolumeFilesystem, + storageClassName: "test-local-sc-ext4", + diskIndex: 1, + testFinalizer: false, + testAnnotations: false, + testSymlinks: false, + }, + { + name: "xfs-filesystem", + fsType: "xfs", + volumeMode: localv1.PersistentVolumeFilesystem, + storageClassName: "test-local-sc-xfs", + diskIndex: 2, + testFinalizer: false, + testAnnotations: false, + testSymlinks: false, + }, + { + name: "block-mode", + fsType: "", + volumeMode: localv1.PersistentVolumeBlock, + storageClassName: "test-local-sc-block", + diskIndex: 3, + testFinalizer: false, + testAnnotations: false, + testSymlinks: false, + }, } - // release pvs - eventuallyDelete(t, false, consumingObjectList...) - - // verify that PVs eventually come back - eventuallyFindAvailablePVs(t, f, localVolume.Spec.StorageClassDevices[0].StorageClassName, pvs) - - // consume one PV - consumingObjectList = make([]client.Object, 0) - addToCleanupFuncs(cleanupFuncs, "pv-consumer", func(t *testing.T) error { - eventuallyDelete(t, false, consumingObjectList...) - return nil - }) - for _, pv := range pvs[:1] { - pvc, job, pod := consumePV(t, ctx, pv) - consumingObjectList = append(consumingObjectList, job, pvc, pod) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + selectedDisk := nodeEnv[0].disks[tc.diskIndex] + matcher.Expect(selectedDisk.path).ShouldNot(gomega.BeZero(), "device path should not be empty for "+tc.name) + + var localVolume *localv1.LocalVolume + if tc.name == "default-filesystem-with-tolerations" { + // Use the original getFakeLocalVolume which includes tolerations + localVolume = getFakeLocalVolume(selectedNode, selectedDisk.path, namespace) + } else { + // Use getFakeLocalVolumeWithFSType for other cases + localVolume = getFakeLocalVolumeWithFSType( + selectedNode, + selectedDisk.path, + namespace, + tc.storageClassName, + tc.fsType, + tc.volumeMode, + "", // auto-generate name + nil, // no tolerations + ) + } + + matcher.Eventually(func() error { + t.Logf("creating localvolume %s with fsType=%q, volumeMode=%s", tc.name, tc.fsType, tc.volumeMode) + return f.Client.Create(goctx.TODO(), localVolume, &framework.CleanupOptions{TestContext: ctx}) + }, time.Minute, time.Second*2).ShouldNot(gomega.HaveOccurred(), "creating localvolume "+tc.name) + + // add pv and storageclass cleanup + addToCleanupFuncs( + cleanupFuncs, + fmt.Sprintf("cleanupLVResources-%s", tc.name), + func(t *testing.T) error { + return cleanupLVResources(t, f, localVolume) + }, + ) + + err = waitForDaemonSet(t, f.KubeClient, namespace, nodedaemon.DiskMakerName, retryInterval, timeout) + if err != nil { + t.Fatalf("error waiting for diskmaker daemonset : %v", err) + } + + err = verifyLocalVolume(t, localVolume, f.Client) + if err != nil { + t.Fatalf("error verifying localvolume cr: %v", err) + } + + err = checkLocalVolumeStatus(t, localVolume) + if err != nil { + t.Fatalf("error checking localvolume condition: %v", err) + } + + // Find PVs + pvs := eventuallyFindPVs(t, f, tc.storageClassName, 1) + if len(pvs) == 0 { + t.Fatalf("no pvs returned by eventuallyFindPVs for %s", tc.name) + } + + // Verify PV path + var expectedPath string + if selectedDisk.id != "" { + expectedPath = selectedDisk.id + } else { + expectedPath = selectedDisk.name + } + matcher.Expect(filepath.Base(pvs[0].Spec.Local.Path)).To(gomega.Equal(expectedPath)) + + // Verify PV spec matches expected FSType and VolumeMode + pv := pvs[0] + t.Logf("verifying PV %s for test case %s", pv.Name, tc.name) + + if tc.volumeMode == localv1.PersistentVolumeBlock { + // For block mode, verify volumeMode is Block + if pv.Spec.VolumeMode == nil || *pv.Spec.VolumeMode != corev1.PersistentVolumeBlock { + t.Fatalf("expected PV volumeMode to be Block, got %v", pv.Spec.VolumeMode) + } + t.Logf("verified PV %s has volumeMode=Block", pv.Name) + } else { + // For filesystem mode, verify fsType if specified + if tc.fsType != "" { + if pv.Spec.Local.FSType == nil || *pv.Spec.Local.FSType != tc.fsType { + t.Fatalf("expected PV fsType to be %s, got %v", tc.fsType, pv.Spec.Local.FSType) + } + t.Logf("verified PV %s has fsType=%s", pv.Name, tc.fsType) + } + } + + // Verify StorageClass exists (Local Storage Operator uses kubernetes.io/no-provisioner, + // so FSType is set on PV spec directly, not in StorageClass parameters) + _, err = f.KubeClient.StorageV1().StorageClasses().Get(goctx.TODO(), tc.storageClassName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting storage class %s: %v", tc.storageClassName, err) + } + t.Logf("verified StorageClass %s exists", tc.storageClassName) + + // Test provisioner annotations (only for default test) + if tc.testAnnotations { + t.Logf("looking for %q annotation on pvs", provCommon.AnnProvisionedBy) + verifyProvisionerAnnotation(t, pvs, nodeList.Items) + } + + // Test PV deletion and recreation + for _, pv := range pvs { + eventuallyDelete(t, false, &pv) + } + // verify that PVs come back after deletion + pvs = eventuallyFindPVs(t, f, tc.storageClassName, 1) + + // consume pvs + consumingObjectList := make([]client.Object, 0) + for _, pv := range pvs { + pvc, job, pod := consumePV(t, ctx, pv) + consumingObjectList = append(consumingObjectList, job, pvc, pod) + } + // release pvs + eventuallyDelete(t, false, consumingObjectList...) + + // verify that PVs eventually come back + eventuallyFindAvailablePVs(t, f, tc.storageClassName, pvs) + + // Test finalizer behavior (only for default test) + if tc.testFinalizer { + // consume one PV + consumingObjectList = make([]client.Object, 0) + + addToCleanupFuncs(cleanupFuncs, "pv-consumer", func(t *testing.T) error { + eventuallyDelete(t, false, consumingObjectList...) + return nil + }) + for _, pv := range pvs[:1] { + pvc, job, pod := consumePV(t, ctx, pv) + consumingObjectList = append(consumingObjectList, job, pvc, pod) + } + + // attempt localVolume deletion + matcher.Eventually(func() error { + t.Logf("deleting LocalVolume %q", localVolume.Name) + return f.Client.Delete(context.TODO(), localVolume, client.PropagationPolicy(metav1.DeletePropagationBackground)) + }, time.Minute*5, time.Second*5).ShouldNot(gomega.HaveOccurred(), "deleting LocalVolume %q", localVolume.Name) + + // verify finalizer not removed while bound pvs exist + matcher.Consistently(func() bool { + t.Logf("verifying finalizer still exists") + err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: localVolume.Name, Namespace: f.OperatorNamespace}, localVolume) + if err != nil && (errors.IsGone(err) || errors.IsNotFound(err)) { + t.Fatalf("LocalVolume deleted with bound PVs") + return false + } else if err != nil { + t.Logf("error getting LocalVolume: %+v", err) + return false + } + return len(localVolume.ObjectMeta.Finalizers) > 0 + }, time.Second*15, time.Second*3).Should(gomega.BeTrue(), "checking finalizer exists with bound PVs") + + // release PV + t.Logf("releasing pvs") + eventuallyDelete(t, false, consumingObjectList...) + } else { + // For non-finalizer tests, just delete the LocalVolume + matcher.Eventually(func() error { + t.Logf("deleting LocalVolume %q", localVolume.Name) + return f.Client.Delete(context.TODO(), localVolume, client.PropagationPolicy(metav1.DeletePropagationBackground)) + }, time.Minute*5, time.Second*5).ShouldNot(gomega.HaveOccurred(), "deleting LocalVolume %q", localVolume.Name) + } + + // verify localVolume deletion + matcher.Eventually(func() bool { + t.Log("verifying LocalVolume deletion") + err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: localVolume.Name, Namespace: f.OperatorNamespace}, localVolume) + if err != nil && (errors.IsGone(err) || errors.IsNotFound(err)) { + t.Logf("LocalVolume deleted: %+v", err) + return true + } else if err != nil { + t.Logf("error getting LocalVolume: %+v", err) + return false + } + t.Logf("LocalVolume found: %q with finalizers: %+v", localVolume.Name, localVolume.ObjectMeta.Finalizers) + return false + }, time.Minute*5, time.Second*5).Should(gomega.BeTrue(), "verifying LocalVolume %s has been deleted", tc.name) + + // Check for leftover symlinks (only for default test) + if tc.testSymlinks { + symLinkPath := path.Join(common.GetLocalDiskLocationPath(), tc.storageClassName) + checkForSymlinks(t, ctx, nodeEnv, symLinkPath) + } + }) } - // attempt localVolume deletion - matcher.Eventually(func() error { - t.Logf("deleting LocalVolume %q", localVolume.Name) - return f.Client.Delete(context.TODO(), localVolume, client.PropagationPolicy(metav1.DeletePropagationBackground)) - }, time.Minute*5, time.Second*5).ShouldNot(gomega.HaveOccurred(), "deleting LocalVolume %q", localVolume.Name) - - // verify finalizer not removed with while bound pvs exists - matcher.Consistently(func() bool { - t.Logf("verifying finalizer still exists") - err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: localVolume.Name, Namespace: f.OperatorNamespace}, localVolume) - if err != nil && (errors.IsGone(err) || errors.IsNotFound(err)) { - t.Fatalf("LocalVolume deleted with bound PVs") - return false - } else if err != nil { - t.Logf("error getting LocalVolume: %+v", err) - return false - } - return len(localVolume.ObjectMeta.Finalizers) > 0 - }, time.Second*15, time.Second*3).Should(gomega.BeTrue(), "checking finalizer exists with bound PVs") - // release PV - t.Logf("releasing pvs") - eventuallyDelete(t, false, consumingObjectList...) - // verify localVolume deletion - matcher.Eventually(func() bool { - t.Log("verifying LocalVolume deletion") - err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: localVolume.Name, Namespace: f.OperatorNamespace}, localVolume) - if err != nil && (errors.IsGone(err) || errors.IsNotFound(err)) { - t.Logf("LocalVolume deleted: %+v", err) - return true - } else if err != nil { - t.Logf("error getting LocalVolume: %+v", err) - return false - } - t.Logf("LocalVolume found: %q with finalizers: %+v", localVolume.Name, localVolume.ObjectMeta.Finalizers) - return false - }).Should(gomega.BeTrue(), "verifying LocalVolume has been deleted", localVolume.Name) - - // check for leftover symlinks before cleanup - symLinkPath := path.Join(common.GetLocalDiskLocationPath(), lvStorageClassName) - checkForSymlinks(t, ctx, nodeEnv, symLinkPath) } } @@ -511,42 +638,23 @@ func waitForNodeTaintUpdate(t *testing.T, kubeclient kubernetes.Interface, node } func getFakeLocalVolume(selectedNode v1.Node, selectedDisk, namespace string) *localv1.LocalVolume { - localVolume := &localv1.LocalVolume{ - TypeMeta: metav1.TypeMeta{ - Kind: "LocalVolume", - APIVersion: localv1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-local-disk", - Namespace: namespace, - }, - Spec: localv1.LocalVolumeSpec{ - NodeSelector: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchFields: []v1.NodeSelectorRequirement{ - {Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{selectedNode.Name}}, - }, - }, - }, - }, - Tolerations: []v1.Toleration{ - { - Key: "localstorage", - Value: "testvalue", - Operator: "Equal", - }, - }, - StorageClassDevices: []localv1.StorageClassDevice{ - { - StorageClassName: lvStorageClassName, - DevicePaths: []string{selectedDisk}, - }, + // Use the flexible helper with default values for backward compatibility + return getFakeLocalVolumeWithFSType( + selectedNode, + selectedDisk, + namespace, + lvStorageClassName, + "", // default: no fsType specified + localv1.PersistentVolumeFilesystem, // default: Filesystem mode + "test-local-disk", // default name + []v1.Toleration{ // default tolerations + { + Key: "localstorage", + Value: "testvalue", + Operator: "Equal", }, }, - } - - return localVolume + ) } func deleteResource(obj client.Object, namespace, name string, client framework.FrameworkClient) error { @@ -573,3 +681,58 @@ func deleteResource(obj client.Object, namespace, name string, client framework. }) return waitErr } + +// getFakeLocalVolumeWithFSType creates a LocalVolume CR with specific FSType and VolumeMode +// Parameters: +// - selectedNode: the node to deploy on +// - selectedDisk: the disk device path +// - namespace: the namespace for the LocalVolume +// - storageClassName: the storage class name +// - fsType: filesystem type (ext4, xfs, or empty string for block mode) +// - volumeMode: PersistentVolumeFilesystem or PersistentVolumeBlock +// - name: the name of the LocalVolume CR (if empty, generates from storageClassName) +// - tolerations: tolerations to apply (can be nil) +func getFakeLocalVolumeWithFSType(selectedNode v1.Node, selectedDisk, namespace, storageClassName, fsType string, volumeMode localv1.PersistentVolumeMode, name string, tolerations []v1.Toleration) *localv1.LocalVolume { + // Generate name if not provided + if name == "" { + name = fmt.Sprintf("test-local-disk-%s", storageClassName) + } + + // Build StorageClassDevice + scd := localv1.StorageClassDevice{ + StorageClassName: storageClassName, + DevicePaths: []string{selectedDisk}, + VolumeMode: volumeMode, + } + + // Only set FSType for filesystem mode, not for block mode + if volumeMode != localv1.PersistentVolumeBlock && fsType != "" { + scd.FSType = fsType + } + + localVolume := &localv1.LocalVolume{ + TypeMeta: metav1.TypeMeta{ + Kind: "LocalVolume", + APIVersion: localv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: localv1.LocalVolumeSpec{ + NodeSelector: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchFields: []v1.NodeSelectorRequirement{ + {Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{selectedNode.Name}}, + }, + }, + }, + }, + Tolerations: tolerations, + StorageClassDevices: []localv1.StorageClassDevice{scd}, + }, + } + + return localVolume +} diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 11bc09d34..96fa2d7cf 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -19,10 +19,7 @@ import ( var ( retryInterval = time.Second * 5 - // timeout = time.Second * 120 - timeout = time.Hour - cleanupRetryInterval = time.Second * 1 - cleanupTimeout = time.Second * 5 + timeout = time.Hour * 1 // testmap is a map from test-name to test-wrapper. // test-wrapper returns a func that can be run with t.Run(func(*testing.T), but