From a2a5231b4be04685d33b047be60c014210ffdc23 Mon Sep 17 00:00:00 2001 From: Lubron Zhan Date: Wed, 19 Nov 2025 23:54:30 -0800 Subject: [PATCH] Sort voluems by targetID --- api/v1alpha5/virtualmachine_storage_types.go | 30 +-- .../volume/volume_controller.go | 8 +- .../volumebatch/volumebatch_controller.go | 46 ++-- .../volumebatch_controller_unit_test.go | 79 ++++-- .../vsphere/vmlifecycle/update_status.go | 7 +- .../vsphere/vmlifecycle/update_status_test.go | 231 ++++++++++++------ pkg/util/volumes/volumes_util.go | 18 ++ 7 files changed, 290 insertions(+), 129 deletions(-) diff --git a/api/v1alpha5/virtualmachine_storage_types.go b/api/v1alpha5/virtualmachine_storage_types.go index 5b75de5d1..abd9a4c2c 100644 --- a/api/v1alpha5/virtualmachine_storage_types.go +++ b/api/v1alpha5/virtualmachine_storage_types.go @@ -5,8 +5,6 @@ package v1alpha5 import ( - "slices" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -342,19 +340,21 @@ type VirtualMachineVolumeStatus struct { Error string `json:"error,omitempty"` } -// SortVirtualMachineVolumeStatuses sorts the provided list of -// VirtualMachineVolumeStatus objects. -func SortVirtualMachineVolumeStatuses(s []VirtualMachineVolumeStatus) { - slices.SortFunc(s, func(a, b VirtualMachineVolumeStatus) int { - switch { - case a.DiskUUID < b.DiskUUID: - return -1 - case a.DiskUUID > b.DiskUUID: - return 1 - default: - return 0 - } - }) +// GetControllerType returns the type of controller to which the disk is +// attached. +func (v VirtualMachineVolumeStatus) GetControllerType() VirtualControllerType { + return v.ControllerType +} + +// GetControllerBusNumber returns the bus number of the controller to which the +// disk is connected. +func (v VirtualMachineVolumeStatus) GetControllerBusNumber() *int32 { + return v.ControllerBusNumber +} + +// GetUnitNumber returns the disk's unit number. +func (v VirtualMachineVolumeStatus) GetUnitNumber() *int32 { + return v.UnitNumber } // VirtualMachineStorageStatus defines the observed state of a VirtualMachine's diff --git a/controllers/virtualmachine/volume/volume_controller.go b/controllers/virtualmachine/volume/volume_controller.go index 0dcd36220..1a52923cd 100644 --- a/controllers/virtualmachine/volume/volume_controller.go +++ b/controllers/virtualmachine/volume/volume_controller.go @@ -45,6 +45,7 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/record" pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1" + pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes" ) // AddToManager adds this package's controller to the provided manager. @@ -786,9 +787,10 @@ func (r *Reconciler) processAttachments( // Update the existing status with the new list of managed volumes. ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, volumeStatuses...) - // This is how the previous code sorted, but IMO keeping in Spec order makes - // more sense. - vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes) + pkgvol.SortVirtualMachineVolumeStatuses( + ctx.VM.Status.Volumes, + true, // Sort by UUID. + ) return apierrorsutil.NewAggregate(createErrs) } diff --git a/controllers/virtualmachine/volumebatch/volumebatch_controller.go b/controllers/virtualmachine/volumebatch/volumebatch_controller.go index 8e414808a..75eebe289 100644 --- a/controllers/virtualmachine/volumebatch/volumebatch_controller.go +++ b/controllers/virtualmachine/volumebatch/volumebatch_controller.go @@ -41,6 +41,7 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants" "github.com/vmware-tanzu/vm-operator/pkg/record" pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" + pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes" ) const ( @@ -659,16 +660,10 @@ func (r *Reconciler) getVMVolStatusesFromBatchAttachment( vol.PersistentVolumeClaim.ClaimName == volStatus.PersistentVolumeClaim.ClaimName { vmVolStatus = attachmentStatusToVolumeStatus(volStatus.Name, volStatus) - existingVol := existingVMManagedVolStatus[vol.Name] - vmVolStatus.Used = existingVol.Used - vmVolStatus.Crypto = existingVol.Crypto - vmVolStatus.ControllerType = existingVol.ControllerType - vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber - vmVolStatus.UnitNumber = existingVol.UnitNumber - vmVolStatus.DiskMode = existingVol.DiskMode - vmVolStatus.SharingMode = existingVol.SharingMode - - // Add PVC capacity information + + updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus) + + // Add PVC capacity information. if err := r.updateVolumeStatusWithPVCInfo( ctx, volStatus.PersistentVolumeClaim.ClaimName, @@ -1029,14 +1024,8 @@ func (r *Reconciler) getVMVolStatusesFromLegacyAttachments( // vm.status.vol and legacyAttachment. if vol.PersistentVolumeClaim.ClaimName == att.Spec.VolumeName { vmVolStatus := legacyAttachmentToVolumeStatus(vol.Name, att) - existingVol := existingVMManagedVolStatus[vol.Name] - vmVolStatus.Used = existingVol.Used - vmVolStatus.Crypto = existingVol.Crypto - vmVolStatus.ControllerType = existingVol.ControllerType - vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber - vmVolStatus.UnitNumber = existingVol.UnitNumber - vmVolStatus.DiskMode = existingVol.DiskMode - vmVolStatus.SharingMode = existingVol.SharingMode + + updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus) // Add PVC capacity information if err := r.updateVolumeStatusWithPVCInfo( @@ -1096,7 +1085,10 @@ func updateVMVolumeStatus( ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, v2...) // Sort the volume statuses to ensure consistent ordering. - vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes) + pkgvol.SortVirtualMachineVolumeStatuses( + ctx.VM.Status.Volumes, + false, // Sort by TargetID. + ) } // categorizeVolumeSpecs categorizes the volume specs into two categories: @@ -1147,3 +1139,19 @@ func categorizeVolumeSpecs( } return volumeSpecsForBatch, volumeSpecsForLegacy } + +// Update the target vmVolStatus with info from existing VM vol status. +func updateVolumeStatusWithExistingVMStatus( + vmVolStatus *vmopv1.VirtualMachineVolumeStatus, + existingVMManagedVolStatusMap map[string]vmopv1.VirtualMachineVolumeStatus) { + + existingVolStatus := existingVMManagedVolStatusMap[vmVolStatus.Name] + + vmVolStatus.Used = existingVolStatus.Used + vmVolStatus.Crypto = existingVolStatus.Crypto + vmVolStatus.ControllerType = existingVolStatus.ControllerType + vmVolStatus.ControllerBusNumber = existingVolStatus.ControllerBusNumber + vmVolStatus.UnitNumber = existingVolStatus.UnitNumber + vmVolStatus.DiskMode = existingVolStatus.DiskMode + vmVolStatus.SharingMode = existingVolStatus.SharingMode +} diff --git a/controllers/virtualmachine/volumebatch/volumebatch_controller_unit_test.go b/controllers/virtualmachine/volumebatch/volumebatch_controller_unit_test.go index 15c96b856..cb16833ae 100644 --- a/controllers/virtualmachine/volumebatch/volumebatch_controller_unit_test.go +++ b/controllers/virtualmachine/volumebatch/volumebatch_controller_unit_test.go @@ -941,8 +941,8 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() { Expect(vm.Status.Volumes).To(HaveLen(2)) Expect(attachment.Status.VolumeStatus).To(HaveLen(2)) - assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 0, false) - assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 1, false) + assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false) + assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false) }) }) @@ -950,23 +950,29 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type classicDisk1 := func() vmopv1.VirtualMachineVolumeStatus { return vmopv1.VirtualMachineVolumeStatus{ - Name: "my-disk-0", - Type: vmopv1.VolumeTypeClassic, - Limit: ptr.To(resource.MustParse("10Gi")), - Used: ptr.To(resource.MustParse("91Gi")), - Attached: true, - DiskUUID: "100", + Name: "my-disk-0", + Type: vmopv1.VolumeTypeClassic, + Limit: ptr.To(resource.MustParse("10Gi")), + Used: ptr.To(resource.MustParse("91Gi")), + Attached: true, + DiskUUID: "100", + ControllerBusNumber: ptr.To(int32(1)), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + UnitNumber: ptr.To(int32(0)), } } classicDisk2 := func() vmopv1.VirtualMachineVolumeStatus { return vmopv1.VirtualMachineVolumeStatus{ - Name: "my-disk-1", - Type: vmopv1.VolumeTypeClassic, - Limit: ptr.To(resource.MustParse("15Gi")), - Used: ptr.To(resource.MustParse("5Gi")), - Attached: true, - DiskUUID: "101", + Name: "my-disk-1", + Type: vmopv1.VolumeTypeClassic, + Limit: ptr.To(resource.MustParse("15Gi")), + Used: ptr.To(resource.MustParse("5Gi")), + Attached: true, + DiskUUID: "101", + ControllerBusNumber: ptr.To(int32(1)), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + UnitNumber: ptr.To(int32(1)), } } @@ -990,15 +996,17 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type attachment := getCNSBatchAttachmentForVolumeName(ctx, vm) Expect(attachment).ToNot(BeNil()) - By("VM Status.Volumes are sorted by DiskUUID", func() { + By("VM Status.Volumes are stable sorted by Target ID", func() { Expect(vm.Status.Volumes).To(HaveLen(4)) Expect(attachment.Status.VolumeStatus).To(HaveLen(2)) - Expect(vm.Status.Volumes[0]).To(Equal(classicDisk1())) - Expect(vm.Status.Volumes[1]).To(Equal(classicDisk2())) + // Classic disks have target ID. + Expect(vm.Status.Volumes[2]).To(Equal(classicDisk1())) + Expect(vm.Status.Volumes[3]).To(Equal(classicDisk2())) - assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 2, 1, false) - assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 3, 0, false) + // Empty target ID so ranked before Classic disks. + assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false) + assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false) }) } @@ -1021,7 +1029,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type GinkgoHelper() - Expect(vm.Status.Volumes[3].Used).To(Equal(ptr.To(resource.MustParse("1Gi")))) + Expect(vm.Status.Volumes[0].Used).To(Equal(ptr.To(resource.MustParse("1Gi")))) } It("includes the PVC usage in the result", func() { @@ -1052,7 +1060,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type GinkgoHelper() - Expect(vm.Status.Volumes[3].Limit).To(Equal(ptr.To(resource.MustParse("20Gi")))) + Expect(vm.Status.Volumes[0].Limit).To(Equal(ptr.To(resource.MustParse("20Gi")))) } When("PVC has limit", func() { @@ -1112,7 +1120,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type GinkgoHelper() - Expect(vm.Status.Volumes[3].Crypto).To(Equal(newCryptoStatus())) + Expect(vm.Status.Volumes[0].Crypto).To(Equal(newCryptoStatus())) } BeforeEach(func() { @@ -1130,6 +1138,31 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type assertPVCHasCrypto() }) }) + + When("Existing status has Controller info", func() { + BeforeEach(func() { + vm.Status.Volumes = append(vm.Status.Volumes, + vmopv1.VirtualMachineVolumeStatus{ + Name: vmVol1.Name, + Type: vmopv1.VolumeTypeManaged, + ControllerBusNumber: ptr.To(int32(0)), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + UnitNumber: ptr.To(int32(0)), + }, + vmopv1.VirtualMachineVolumeStatus{ + Name: vmVol2.Name, + Type: vmopv1.VolumeTypeManaged, + ControllerBusNumber: ptr.To(int32(0)), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + UnitNumber: ptr.To(int32(1)), + }, + ) + }) + + It("include the Controller info and sort by controller info", func() { + assertBaselineVolStatus() + }) + }) }) }) }) @@ -1360,7 +1393,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type attachment := getCNSBatchAttachmentForVolumeName(ctx, vm) Expect(attachment).ToNot(BeNil()) - By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() { + By("VM Status.Volumes are stable-sorted by Spec.Volumes order, and attached should be False", func() { Expect(vm.Status.Volumes).To(HaveLen(1)) Expect(attachment.Status.VolumeStatus).To(HaveLen(1)) assertVMVolStatusFromBatchAttachmentSpec(vm, attachment, 0, 0) diff --git a/pkg/providers/vsphere/vmlifecycle/update_status.go b/pkg/providers/vsphere/vmlifecycle/update_status.go index 3dce7b4a5..2882a3ec2 100644 --- a/pkg/providers/vsphere/vmlifecycle/update_status.go +++ b/pkg/providers/vsphere/vmlifecycle/update_status.go @@ -1438,7 +1438,12 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) { }) // This sort order is consistent with the logic from the volumes controller. - vmopv1.SortVirtualMachineVolumeStatuses(vm.Status.Volumes) + // If VMSharedDisks is enabled, sort by TargetID. + sortByUUID := !pkgcfg.FromContext(vmCtx).Features.VMSharedDisks + pkgvol.SortVirtualMachineVolumeStatuses( + vm.Status.Volumes, + sortByUUID, + ) } type probeResult uint8 diff --git a/pkg/providers/vsphere/vmlifecycle/update_status_test.go b/pkg/providers/vsphere/vmlifecycle/update_status_test.go index 7cd4f89a9..196d6ab33 100644 --- a/pkg/providers/vsphere/vmlifecycle/update_status_test.go +++ b/pkg/providers/vsphere/vmlifecycle/update_status_test.go @@ -1112,6 +1112,16 @@ var _ = Describe("UpdateStatus", func() { }, }, }, + &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + BusNumber: 1, + VirtualDevice: vimtypes.VirtualDevice{ + Key: 201, + }, + }, + }, + }, // classic &vimtypes.VirtualDisk{ @@ -1143,8 +1153,9 @@ var _ = Describe("UpdateStatus", func() { }, Uuid: "101", }, - Key: 101, - UnitNumber: ptr.To[int32](0), + Key: 101, + ControllerKey: 200, + UnitNumber: ptr.To[int32](0), }, CapacityInBytes: 1 * oneGiBInBytes, }, @@ -1157,8 +1168,9 @@ var _ = Describe("UpdateStatus", func() { }, Uuid: "102", }, - Key: 102, - UnitNumber: ptr.To[int32](0), + Key: 102, + ControllerKey: 200, + UnitNumber: ptr.To[int32](1), }, CapacityInBytes: 2 * oneGiBInBytes, }, @@ -1171,8 +1183,9 @@ var _ = Describe("UpdateStatus", func() { }, Uuid: "103", }, - Key: 103, - UnitNumber: ptr.To[int32](0), + Key: 103, + ControllerKey: 201, + UnitNumber: ptr.To[int32](0), }, CapacityInBytes: 3 * oneGiBInBytes, }, @@ -1431,70 +1444,152 @@ var _ = Describe("UpdateStatus", func() { }) When("target id is in volume", func() { - BeforeEach(func() { - pkgcfg.SetContext(vmCtx, func(config *pkgcfg.Config) { - config.Features.AllDisksArePVCs = true + When("AllDisksArePVCs is enabled", func() { + BeforeEach(func() { + pkgcfg.SetContext(vmCtx, func(config *pkgcfg.Config) { + config.Features.AllDisksArePVCs = true + }) + }) + Specify("status.volumes includes the classic disks, volumes are sorted by UUID", func() { + Expect(vmCtx.VM.Status.Volumes).To(Equal([]vmopv1.VirtualMachineVolumeStatus{ + { + Name: pkgutil.GeneratePVCName("disk", "100"), + DiskUUID: "100", + Type: vmopv1.VolumeTypeClassic, + Crypto: &vmopv1.VirtualMachineVolumeCryptoStatus{ + KeyID: "my-key-id", + ProviderID: "my-provider-id", + }, + Attached: true, + Limit: kubeutil.BytesToResource(10 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(10 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](3), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "101"), + DiskUUID: "101", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(1 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(1 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (0.25 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "102"), + DiskUUID: "102", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(2 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(2 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (0.5 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](1), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "103"), + DiskUUID: "103", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(3 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(3 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](1), + }, + { + Name: pkgutil.GeneratePVCName("disk", "104"), + DiskUUID: "104", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(4 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(4 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (2 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), + }, + })) }) }) - Specify("status.volumes includes the classic disks", func() { - Expect(vmCtx.VM.Status.Volumes).To(Equal([]vmopv1.VirtualMachineVolumeStatus{ - { - Name: pkgutil.GeneratePVCName("disk", "100"), - DiskUUID: "100", - Type: vmopv1.VolumeTypeClassic, - Crypto: &vmopv1.VirtualMachineVolumeCryptoStatus{ - KeyID: "my-key-id", - ProviderID: "my-provider-id", + + When("VMSharedDisks is enabled", func() { + BeforeEach(func() { + pkgcfg.SetContext(vmCtx, func(config *pkgcfg.Config) { + config.Features.VMSharedDisks = true + }) + }) + Specify("status.volumes includes the classic disks, volumes are sorted by TargetID", func() { + Expect(vmCtx.VM.Status.Volumes).To(Equal([]vmopv1.VirtualMachineVolumeStatus{ + { + Name: pkgutil.GeneratePVCName("disk", "104"), + DiskUUID: "104", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(4 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(4 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (2 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), }, - Attached: true, - Limit: kubeutil.BytesToResource(10 * oneGiBInBytes), - Requested: kubeutil.BytesToResource(10 * oneGiBInBytes), - Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), - UnitNumber: ptr.To[int32](3), - ControllerType: vmopv1.VirtualControllerTypeSCSI, - ControllerBusNumber: ptr.To[int32](0), - }, - { - Name: pkgutil.GeneratePVCName("disk", "101"), - DiskUUID: "101", - Type: vmopv1.VolumeTypeClassic, - Attached: true, - Limit: kubeutil.BytesToResource(1 * oneGiBInBytes), - Requested: kubeutil.BytesToResource(1 * oneGiBInBytes), - Used: kubeutil.BytesToResource(500 + (0.25 * oneGiBInBytes)), - UnitNumber: ptr.To[int32](0), - }, - { - Name: pkgutil.GeneratePVCName("disk", "102"), - DiskUUID: "102", - Type: vmopv1.VolumeTypeClassic, - Attached: true, - Limit: kubeutil.BytesToResource(2 * oneGiBInBytes), - Requested: kubeutil.BytesToResource(2 * oneGiBInBytes), - Used: kubeutil.BytesToResource(500 + (0.5 * oneGiBInBytes)), - UnitNumber: ptr.To[int32](0), - }, - { - Name: pkgutil.GeneratePVCName("disk", "103"), - DiskUUID: "103", - Type: vmopv1.VolumeTypeClassic, - Attached: true, - Limit: kubeutil.BytesToResource(3 * oneGiBInBytes), - Requested: kubeutil.BytesToResource(3 * oneGiBInBytes), - Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), - UnitNumber: ptr.To[int32](0), - }, - { - Name: pkgutil.GeneratePVCName("disk", "104"), - DiskUUID: "104", - Type: vmopv1.VolumeTypeClassic, - Attached: true, - Limit: kubeutil.BytesToResource(4 * oneGiBInBytes), - Requested: kubeutil.BytesToResource(4 * oneGiBInBytes), - Used: kubeutil.BytesToResource(500 + (2 * oneGiBInBytes)), - UnitNumber: ptr.To[int32](0), - }, - })) + { + Name: pkgutil.GeneratePVCName("disk", "101"), + DiskUUID: "101", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(1 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(1 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (0.25 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "102"), + DiskUUID: "102", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(2 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(2 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (0.5 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](1), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "100"), + DiskUUID: "100", + Type: vmopv1.VolumeTypeClassic, + Crypto: &vmopv1.VirtualMachineVolumeCryptoStatus{ + KeyID: "my-key-id", + ProviderID: "my-provider-id", + }, + Attached: true, + Limit: kubeutil.BytesToResource(10 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(10 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](3), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](0), + }, + { + Name: pkgutil.GeneratePVCName("disk", "103"), + DiskUUID: "103", + Type: vmopv1.VolumeTypeClassic, + Attached: true, + Limit: kubeutil.BytesToResource(3 * oneGiBInBytes), + Requested: kubeutil.BytesToResource(3 * oneGiBInBytes), + Used: kubeutil.BytesToResource(500 + (1 * oneGiBInBytes)), + UnitNumber: ptr.To[int32](0), + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To[int32](1), + }, + })) + }) }) }) }) @@ -1730,7 +1825,7 @@ var _ = Describe("UpdateStatus", func() { When("there is a classic disk w an invalid path", func() { BeforeEach(func() { vmCtx.MoVM.Config.Hardware. - Device[1].(*vimtypes.VirtualDisk). + Device[2].(*vimtypes.VirtualDisk). Backing.(*vimtypes.VirtualDiskFlatVer2BackingInfo). FileName = "invalid" }) diff --git a/pkg/util/volumes/volumes_util.go b/pkg/util/volumes/volumes_util.go index 14f43b4e3..e39b68645 100644 --- a/pkg/util/volumes/volumes_util.go +++ b/pkg/util/volumes/volumes_util.go @@ -5,6 +5,7 @@ package volumes import ( + "cmp" "context" "fmt" "slices" @@ -282,3 +283,20 @@ func FromContext(ctx context.Context) (VolumeInfo, bool) { val, ok := obj.(VolumeInfo) return val, ok } + +// SortVirtualMachineVolumeStatuses sorts the provided list of +// VirtualMachineVolumeStatus objects. If byUUID is true, sort by +// UUID as the old behavior. Otherwise sort by TargetID. +func SortVirtualMachineVolumeStatuses(s []vmopv1.VirtualMachineVolumeStatus, byUUID bool) { + keyExtractor := func(v vmopv1.VirtualMachineVolumeStatus) string { + if byUUID { + return v.DiskUUID + } + return vmopv1util.GetTargetID(v) + } + + // Use slices.SortStableFunc with the key extractor to perform the comparison. + slices.SortStableFunc(s, func(a, b vmopv1.VirtualMachineVolumeStatus) int { + return cmp.Compare(keyExtractor(a), keyExtractor(b)) + }) +}