From 424de3d9bad0086ed8cac100679fbc3142d4338b Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Fri, 5 Dec 2025 07:38:57 -0800 Subject: [PATCH 1/4] support muilple share rules (#3023) Signed-off-by: moonek Signed-off-by: Carlos da Silva Co-authored-by: moonek --- .../using-manila-csi-plugin.md | 7 +- .../preprovisioned-pvc.yaml | 2 +- pkg/csi/manila/adapters.go | 39 +++++++ pkg/csi/manila/controllerserver.go | 12 +- pkg/csi/manila/nodeserver.go | 13 +-- pkg/csi/manila/options/shareoptions.go | 7 +- pkg/csi/manila/shareadapters/cephfs.go | 107 +++++++++--------- pkg/csi/manila/shareadapters/nfs.go | 39 ++++--- pkg/csi/manila/shareadapters/shareadapter.go | 7 +- tests/e2e/csi/manila/testdriver.go | 4 +- 10 files changed, 140 insertions(+), 97 deletions(-) diff --git a/docs/manila-csi-plugin/using-manila-csi-plugin.md b/docs/manila-csi-plugin/using-manila-csi-plugin.md index 6379e29ab8..29be4ac5b6 100644 --- a/docs/manila-csi-plugin/using-manila-csi-plugin.md +++ b/docs/manila-csi-plugin/using-manila-csi-plugin.md @@ -60,8 +60,8 @@ Parameter | Required | Description `cephfs-mounter` | _no_ | Relevant for CephFS Manila shares. Specifies which mounting method to use with the CSI CephFS driver. Available options are `kernel` and `fuse`, defaults to `fuse`. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-kernelMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS kernel client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-fuseMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS FUSE client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. -`cephfs-clientID` | _no_ | Relevant for CephFS Manila shares. Specifies the cephx client ID when creating an access rule for the provisioned share. The same cephx client ID may be shared with multiple Manila shares. If no value is provided, client ID for the provisioned Manila share will be set to some unique value (PersistentVolume name). -`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Defaults to `0.0.0.0/0`, i.e. anyone. +`cephfs-clientID` | _no_ | Relevant for CephFS Manila shares. Specifies the cephx client ID when creating an access rule for the provisioned share. The same cephx client ID may be shared with multiple Manila shares. If providing access to multiple cephx client IDs, set it as a comma separated list. If no value is provided, client ID for the provisioned Manila share will be set to some unique value (PersistentVolume name). +`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Use a comma separated list for granting access to multiple IP addresses or subnets. Defaults to `0.0.0.0/0`, i.e. anyone. ### Node Service volume context @@ -71,7 +71,8 @@ Parameter | Required | Description ----------|----------|------------ `shareID` | if `shareName` is not given | The UUID of the share `shareName` | if `shareID` is not given | The name of the share -`shareAccessID` | _yes_ | The UUID of the access rule for the share +`shareAccessID` | _no_ | The UUID of the access rule for the share. This parameter is being deprecated and replaced by `shareAccessIDs`. +`shareAccessIDs` | _yes_ | Comma separated UUIDs of access rules for the share `cephfs-mounter` | _no_ | Relevant for CephFS Manila shares. Specifies which mounting method to use with the CSI CephFS driver. Available options are `kernel` and `fuse`, defaults to `fuse`. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-kernelMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS kernel client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-fuseMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS FUSE client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. diff --git a/examples/manila-csi-plugin/nfs/static-provisioning/preprovisioned-pvc.yaml b/examples/manila-csi-plugin/nfs/static-provisioning/preprovisioned-pvc.yaml index 3b572d1ebf..4519e8fcc8 100644 --- a/examples/manila-csi-plugin/nfs/static-provisioning/preprovisioned-pvc.yaml +++ b/examples/manila-csi-plugin/nfs/static-provisioning/preprovisioned-pvc.yaml @@ -20,7 +20,7 @@ spec: namespace: default volumeAttributes: shareID: SHARE-UUID-GOES-HERE - shareAccessID: ACCESS-UUID-OF-THE-SHARE + shareAccessIDs: COMMA-SEPARATED-ACCESS-UUIDS-OF-THE-SHARE --- apiVersion: v1 kind: PersistentVolumeClaim diff --git a/pkg/csi/manila/adapters.go b/pkg/csi/manila/adapters.go index a41186e691..d60b8e7afa 100644 --- a/pkg/csi/manila/adapters.go +++ b/pkg/csi/manila/adapters.go @@ -19,6 +19,8 @@ package manila import ( "strings" + "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/shares" + "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" "k8s.io/cloud-provider-openstack/pkg/csi/manila/shareadapters" "k8s.io/klog/v2" ) @@ -35,3 +37,40 @@ func getShareAdapter(proto string) shareadapters.ShareAdapter { return nil } + +func getAccessIDs(shareOpts *options.NodeVolumeContext) []string { + if shareOpts.ShareAccessIDs != "" { + // Split by comma if multiple + return strings.Split(shareOpts.ShareAccessIDs, ",") + } else if shareOpts.ShareAccessID != "" { + // Backwards compatibility: treat as single-element list + return []string{shareOpts.ShareAccessID} + } + return nil +} + +func getAccessRightBasedOnShareAdapter(shareAdapter shareadapters.ShareAdapter, accessRights []shares.AccessRight, shareOpts *options.NodeVolumeContext) (accessRight *shares.AccessRight) { + switch shareAdapter.(type) { + case *shareadapters.Cephfs: + shareAccessIDs := getAccessIDs(shareOpts) + for _, accessRightID := range shareAccessIDs { + for _, accessRight := range accessRights { + if accessRight.ID == accessRightID { + // TODO: we should add support for getting the node's own IP or Ceph + // user to avoid unnecessary access rights processing. All the node + // needs is one cephx user/key to mount the share, so we can return + // the first access right that matches the share access IDs list. + return &accessRight + } + } + } + klog.Fatalf("failed to find access rights %s for cephfs share", shareAccessIDs) + case *shareadapters.NFS: + // For NFS, we don't need to use an access right specifically. The controller is + // already making sure the access rules are properly created. + return nil + default: + klog.Fatalf("unknown share adapter type %T", shareAdapter) + } + return nil +} diff --git a/pkg/csi/manila/controllerserver.go b/pkg/csi/manila/controllerserver.go index 83f336ea6b..5dc8c17e32 100644 --- a/pkg/csi/manila/controllerserver.go +++ b/pkg/csi/manila/controllerserver.go @@ -194,18 +194,24 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol ad := getShareAdapter(shareOpts.Protocol) - accessRight, err := ad.GetOrGrantAccess(ctx, &shareadapters.GrantAccessArgs{Share: share, ManilaClient: manilaClient, Options: shareOpts}) + accessRights, err := ad.GetOrGrantAccesses(ctx, &shareadapters.GrantAccessArgs{Share: share, ManilaClient: manilaClient, Options: shareOpts}) if err != nil { if wait.Interrupted(err) { - return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for access rule %s for volume %s to become available", accessRight.ID, share.Name) + return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for access rules for volume %s to become available", share.Name) } return nil, status.Errorf(codes.Internal, "failed to grant access to volume %s: %v", share.Name, err) } + var accessRightIDs []string + for _, ar := range accessRights { + accessRightIDs = append(accessRightIDs, ar.ID) + } + shareAccessIDs := strings.Join(accessRightIDs, ",") + volCtx := filterParametersForVolumeContext(params, options.NodeVolumeContextFields()) volCtx = util.SetMapIfNotEmpty(volCtx, "shareID", share.ID) - volCtx = util.SetMapIfNotEmpty(volCtx, "shareAccessID", accessRight.ID) + volCtx = util.SetMapIfNotEmpty(volCtx, "shareAccessIDs", shareAccessIDs) volCtx = util.SetMapIfNotEmpty(volCtx, "groupID", share.ShareGroupID) volCtx = util.SetMapIfNotEmpty(volCtx, "affinity", shareOpts.Affinity) volCtx = util.SetMapIfNotEmpty(volCtx, "antiAffinity", shareOpts.AntiAffinity) diff --git a/pkg/csi/manila/nodeserver.go b/pkg/csi/manila/nodeserver.go index 6a2f8fb080..e765126714 100644 --- a/pkg/csi/manila/nodeserver.go +++ b/pkg/csi/manila/nodeserver.go @@ -108,18 +108,6 @@ func (ns *nodeServer) buildVolumeContext(ctx context.Context, volID volumeID, sh return nil, nil, status.Errorf(codes.Internal, "failed to list access rights for volume %s: %v", volID, err) } - for i := range accessRights { - if accessRights[i].ID == shareOpts.ShareAccessID { - accessRight = &accessRights[i] - break - } - } - - if accessRight == nil { - return nil, nil, status.Errorf(codes.InvalidArgument, "cannot find access right %s for volume %s", - shareOpts.ShareAccessID, volID) - } - // Retrieve list of all export locations for this share. // Share adapter will try to choose the correct one for mounting. @@ -131,6 +119,7 @@ func (ns *nodeServer) buildVolumeContext(ctx context.Context, volID volumeID, sh // Build volume context for fwd plugin sa := getShareAdapter(ns.d.shareProto) + accessRight = getAccessRightBasedOnShareAdapter(sa, accessRights, shareOpts) opts := &shareadapters.VolumeContextArgs{ Locations: availableExportLocations, Share: share, diff --git a/pkg/csi/manila/options/shareoptions.go b/pkg/csi/manila/options/shareoptions.go index fd4dadc149..af6668e9b6 100644 --- a/pkg/csi/manila/options/shareoptions.go +++ b/pkg/csi/manila/options/shareoptions.go @@ -41,9 +41,10 @@ type ControllerVolumeContext struct { } type NodeVolumeContext struct { - ShareID string `name:"shareID" value:"optionalIf:shareName=." precludes:"shareName"` - ShareName string `name:"shareName" value:"optionalIf:shareID=." precludes:"shareID"` - ShareAccessID string `name:"shareAccessID"` + ShareID string `name:"shareID" value:"optionalIf:shareName=." precludes:"shareName"` + ShareName string `name:"shareName" value:"optionalIf:shareID=." precludes:"shareID"` + ShareAccessID string `name:"shareAccessID" value:"optionalIf:shareAccessIDs=." precludes:"shareAccessIDs"` // Keep this for backwards compatibility + ShareAccessIDs string `name:"shareAccessIDs" value:"optionalIf:shareAccessID=." precludes:"shareAccessID"` // Adapter options diff --git a/pkg/csi/manila/shareadapters/cephfs.go b/pkg/csi/manila/shareadapters/cephfs.go index aae65cceb1..4d3dcfe40b 100644 --- a/pkg/csi/manila/shareadapters/cephfs.go +++ b/pkg/csi/manila/shareadapters/cephfs.go @@ -34,82 +34,77 @@ type Cephfs struct{} var _ ShareAdapter = &Cephfs{} -func (Cephfs) GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) { +func (Cephfs) GetOrGrantAccesses(ctx context.Context, args *GrantAccessArgs) ([]shares.AccessRight, error) { // First, check if the access right exists or needs to be created - var rights []shares.AccessRight - - accessTo := args.Options.CephfsClientID - if accessTo == "" { - accessTo = args.Share.Name - } - - rights, err = args.ManilaClient.GetAccessRights(ctx, args.Share.ID) + rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) if err != nil { if _, ok := err.(gophercloud.ErrResourceNotFound); !ok { return nil, fmt.Errorf("failed to list access rights: %v", err) } - } else { - // Try to find the access right + } - for _, r := range rights { - if r.AccessTo == accessTo && r.AccessType == "cephx" && r.AccessLevel == "rw" { - klog.V(4).Infof("cephx access right for share %s already exists", args.Share.Name) + accessToList := []string{args.Share.Name} + if args.Options.CephfsClientID != "" { + accessToList = strings.Split(args.Options.CephfsClientID, ",") + } - accessRight = &r - break - } + // TODO: add support for getting the exact client ID that the node will use. + // For now, we use the first client ID in the list and it should be enough, + // considering our context with the nodes. + accessRightClient := accessToList[0] + var accessRight *shares.AccessRight + + // Try to find the access right. + for _, r := range rights { + if r.AccessTo == accessRightClient && r.AccessType == "cephx" && r.AccessLevel == "rw" { + klog.V(4).Infof("cephx access right for share %s already exists", args.Share.Name) + accessRight = &r + break } } + // Not found, create it if accessRight == nil { - // Not found, create it - - accessRight, err = args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ + result, err := args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ AccessType: "cephx", AccessLevel: "rw", - AccessTo: accessTo, + AccessTo: accessRightClient, }) - if err != nil { - return + return nil, fmt.Errorf("failed to grant access right: %v", err) } - } - - if accessRight.AccessKey != "" { - // The access right is ready - return - } - - // Wait till a ceph key is assigned to the access right - - backoff := wait.Backoff{ - Duration: time.Second * 5, - Factor: 1.2, - Steps: 10, - } - - return accessRight, wait.ExponentialBackoff(backoff, func() (bool, error) { - rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) - if err != nil { - return false, err - } - - var accessRight *shares.AccessRight - - for i := range rights { - if rights[i].AccessTo == accessTo { - accessRight = &rights[i] - break + if result.AccessKey == "" { + // Wait till a ceph key is assigned to the access right + backoff := wait.Backoff{ + Duration: time.Second * 5, + Factor: 1.2, + Steps: 10, + } + wait_err := wait.ExponentialBackoff(backoff, func() (bool, error) { + rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) + if err != nil { + return false, fmt.Errorf("error get access rights for share %s: %v", args.Share.ID, err) + } + if len(rights) == 0 { + return false, fmt.Errorf("cannot find the access right we've just created") + } + for _, r := range rights { + if r.AccessTo == accessRightClient && r.AccessKey != "" { + accessRight = &r + return true, nil + } + } + klog.V(4).Infof("Access key for %s is not set yet, retrying...", accessRightClient) + return false, nil + }) + if wait_err != nil { + return nil, fmt.Errorf("timed out while attempting to get access rights for share %s: %v", args.Share.ID, err) } } + } + return []shares.AccessRight{*accessRight}, nil - if accessRight == nil { - return false, fmt.Errorf("cannot find the access right we've just created") - } - - return accessRight.AccessKey != "", nil - }) } func (Cephfs) BuildVolumeContext(args *VolumeContextArgs) (volumeContext map[string]string, err error) { diff --git a/pkg/csi/manila/shareadapters/nfs.go b/pkg/csi/manila/shareadapters/nfs.go index 36591e54d1..c74672e77e 100644 --- a/pkg/csi/manila/shareadapters/nfs.go +++ b/pkg/csi/manila/shareadapters/nfs.go @@ -33,7 +33,7 @@ type NFS struct{} var _ ShareAdapter = &NFS{} -func (NFS) GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (*shares.AccessRight, error) { +func (NFS) GetOrGrantAccesses(ctx context.Context, args *GrantAccessArgs) ([]shares.AccessRight, error) { // First, check if the access right exists or needs to be created rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) @@ -43,22 +43,33 @@ func (NFS) GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (*shares } } - // Try to find the access right - - for _, r := range rights { - if r.AccessTo == args.Options.NFSShareClient && r.AccessType == "ip" && r.AccessLevel == "rw" { - klog.V(4).Infof("IP access right for share %s already exists", args.Share.Name) - return &r, nil + accessToList := strings.Split(args.Options.NFSShareClient, ",") + + for _, at := range accessToList { + // Try to find the access right + found := false + for _, r := range rights { + if r.AccessTo == at && r.AccessType == "ip" && r.AccessLevel == "rw" { + klog.V(4).Infof("IP access right %s for share %s already exists", at, args.Share.Name) + found = true + break + } + } + // Not found, create it + if !found { + right, err := args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ + AccessType: "ip", + AccessLevel: "rw", + AccessTo: at, + }) + if err != nil { + return nil, fmt.Errorf("failed to grant access right: %v", err) + } + rights = append(rights, *right) } } - // Not found, create it - - return args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ - AccessType: "ip", - AccessLevel: "rw", - AccessTo: args.Options.NFSShareClient, - }) + return rights, nil } func (NFS) BuildVolumeContext(args *VolumeContextArgs) (volumeContext map[string]string, err error) { diff --git a/pkg/csi/manila/shareadapters/shareadapter.go b/pkg/csi/manila/shareadapters/shareadapter.go index e84f1bdfff..38eec633a8 100644 --- a/pkg/csi/manila/shareadapters/shareadapter.go +++ b/pkg/csi/manila/shareadapters/shareadapter.go @@ -46,10 +46,11 @@ type SecretArgs struct { } type ShareAdapter interface { - // GetOrGrantAccess first tries to retrieve an access right for args.Share. - // An access right is created for the share in case it doesn't exist yet. + // GetOrGrantAccesses first tries to retrieve the list of access rights for args.Share. + // It iterates over the list of access clients that should have access to the share considering nfs-shareClient or cephfs-clientID. + // The access right is created for the share in case it doesn't exist yet. // Returns an existing or new access right for args.Share. - GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) + GetOrGrantAccesses(ctx context.Context, args *GrantAccessArgs) (accessRights []shares.AccessRight, err error) // BuildVolumeContext builds a volume context map that's passed to NodeStageVolumeRequest and NodePublishVolumeRequest BuildVolumeContext(args *VolumeContextArgs) (volumeContext map[string]string, err error) diff --git a/tests/e2e/csi/manila/testdriver.go b/tests/e2e/csi/manila/testdriver.go index 1b29e9bd3f..46ca175c67 100644 --- a/tests/e2e/csi/manila/testdriver.go +++ b/tests/e2e/csi/manila/testdriver.go @@ -197,8 +197,8 @@ func (d *manilaTestDriver) GetPersistentVolumeSource(readOnly bool, fsType strin ReadOnly: readOnly, FSType: fsType, VolumeAttributes: map[string]string{ - "shareID": v.shareID, - "shareAccessID": v.accessID, + "shareID": v.shareID, + "shareAccessIDs": v.accessID, }, NodeStageSecretRef: &v1.SecretReference{ Name: manilaSecretName, From bc819163c428302cf4bb67817c19eaa1bc14a157 Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Wed, 21 Jan 2026 05:21:30 -0800 Subject: [PATCH 2/4] Allow manila e2e testing with DHSS=true (#3050) When using manila DHSS=true the network needs to be specified to allow using/creating the share. To allow e2e testing in a DHSS=true env I added an option to specify network via an environment variable keeping backward compatibility while allowing testing with DHSS=true Co-authored-by: eshulman2 --- tests/e2e/csi/manila/manilavolume.go | 19 +++++++++++++++---- tests/e2e/csi/manila/testdriver.go | 12 ++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/tests/e2e/csi/manila/manilavolume.go b/tests/e2e/csi/manila/manilavolume.go index a322236565..fa13381fcc 100644 --- a/tests/e2e/csi/manila/manilavolume.go +++ b/tests/e2e/csi/manila/manilavolume.go @@ -3,6 +3,7 @@ package test import ( "bytes" "context" + "os" "os/exec" "strconv" "strings" @@ -13,6 +14,10 @@ import ( storageframework "k8s.io/kubernetes/test/e2e/storage/framework" ) +// Environment variable for DHSS=True mode share network. +// This must match the variable in testdriver.go. +var manilaShareNetworkIDForVolume = os.Getenv("MANILA_SHARE_NETWORK_ID") + func runCmd(name string, args ...string) ([]byte, error) { var stdout, stderr bytes.Buffer cmd := exec.Command(name, args...) @@ -47,9 +52,8 @@ func manilaCreateVolume( ginkgo.By("Creating a test Manila volume externally") // Create share. - - out, err := runCmd( - "openstack", + // Build command arguments, optionally including share network for DHSS=True mode. + args := []string{ "share", "create", shareProto, @@ -58,7 +62,14 @@ func manilaCreateVolume( "--format=value", "--column=id", "--wait", - ) + } + + // Support for DHSS=True mode: include share network ID if specified + if manilaShareNetworkIDForVolume != "" { + args = append(args, "--share-network="+manilaShareNetworkIDForVolume) + } + + out, err := runCmd("openstack", args...) shareID := strings.TrimSpace(string(out)) diff --git a/tests/e2e/csi/manila/testdriver.go b/tests/e2e/csi/manila/testdriver.go index 46ca175c67..b152065f6f 100644 --- a/tests/e2e/csi/manila/testdriver.go +++ b/tests/e2e/csi/manila/testdriver.go @@ -3,6 +3,7 @@ package test import ( "context" "fmt" + "os" "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" @@ -30,6 +31,12 @@ const ( manilaShareSizeGiB = 1 ) +// Environment variables for DHSS=True (driver_handles_share_servers) mode. +// Set MANILA_SHARE_NETWORK_ID to enable testing with share networks. +var ( + manilaShareNetworkID = os.Getenv("MANILA_SHARE_NETWORK_ID") +) + type manilaTestDriver struct { driverInfo storageframework.DriverInfo volumeAttributes []map[string]string @@ -129,6 +136,11 @@ func (d *manilaTestDriver) GetDynamicProvisionStorageClass(ctx context.Context, "csi.storage.k8s.io/node-publish-secret-namespace": manilaSecretNamespace, } + // Support for DHSS=True mode: include share network ID if specified + if manilaShareNetworkID != "" { + parameters["shareNetworkID"] = manilaShareNetworkID + } + sc := storageframework.GetStorageClass( d.driverInfo.Name, parameters, From 8595e58091fd21455ebd52cf766a33b5f38b70c3 Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Thu, 22 Jan 2026 12:07:31 -0800 Subject: [PATCH 3/4] [release-1.34] tests: bump devstack to stable/2025.2 (#3055) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * tests: bump devstack to stable/2025.2 * tests: show loadbalancer tags in raw format --------- Co-authored-by: pýrus --- tests/e2e/cloudprovider/test-lb-service.sh | 20 +++++++++---------- .../roles/install-devstack/defaults/main.yaml | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/e2e/cloudprovider/test-lb-service.sh b/tests/e2e/cloudprovider/test-lb-service.sh index cb57d6ec8c..55617f773f 100755 --- a/tests/e2e/cloudprovider/test-lb-service.sh +++ b/tests/e2e/cloudprovider/test-lb-service.sh @@ -57,7 +57,7 @@ function _check_lb_tags { local tags=$3 if [ -z "$tags" ]; then - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) fi if [[ ! "$tags" =~ (^|[[:space:]])kube_service_(.+?)$svcName($|[[:space:]]) ]]; then @@ -468,7 +468,7 @@ EOF lbID=$(_check_service_lb_annotation "${service1}") printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -509,7 +509,7 @@ EOF fi printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -560,7 +560,7 @@ EOF wait_for_loadbalancer $lbID printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -605,7 +605,7 @@ EOF wait_for_service_address ${service3} printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service3 "$tags" if [ $? -ne 0 ]; then @@ -637,7 +637,7 @@ EOF sleep 10 printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -660,7 +660,7 @@ EOF sleep 5 printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -680,7 +680,7 @@ EOF wait_for_loadbalancer $lbID printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -eq 0 ]; then @@ -778,7 +778,7 @@ EOF lbID=$(_check_service_lb_annotation "${service1}") printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -ne 0 ]; then @@ -792,7 +792,7 @@ EOF wait_for_service_deleted ${service1} printf "\n>>>>>>> Validating tags of openstack load balancer %s \n" "$lbID" - tags=$(openstack loadbalancer show $lbID -f value -c tags) + tags=$(openstack loadbalancer show $lbID -f json -c tags | jq -r '.tags[]') tags=$(echo $tags) _check_lb_tags $lbID $service1 "$tags" if [ $? -eq 0 ]; then diff --git a/tests/playbooks/roles/install-devstack/defaults/main.yaml b/tests/playbooks/roles/install-devstack/defaults/main.yaml index c73699bbd5..7b2c47404f 100644 --- a/tests/playbooks/roles/install-devstack/defaults/main.yaml +++ b/tests/playbooks/roles/install-devstack/defaults/main.yaml @@ -1,7 +1,7 @@ --- user: "stack" workdir: "/home/{{ user }}/devstack" -branch: "stable/2025.1" +branch: "stable/2025.2" enable_services: - nova - glance From 12f841ebf98b9966251d0ddcfc0a56606c3f68a9 Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Fri, 23 Jan 2026 10:03:35 -0800 Subject: [PATCH 4/4] tests: split kubectl download task into multiple with retries (#3060) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: pýrus --- .../roles/install-k3s/tasks/main.yaml | 29 +++++++++++++++++-- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/tests/playbooks/roles/install-k3s/tasks/main.yaml b/tests/playbooks/roles/install-k3s/tasks/main.yaml index 75863d85eb..26530979d4 100644 --- a/tests/playbooks/roles/install-k3s/tasks/main.yaml +++ b/tests/playbooks/roles/install-k3s/tasks/main.yaml @@ -154,7 +154,7 @@ retries: 100 delay: 5 -- name: Prepare kubectl and kubeconfig file +- name: Prepare kubeconfig file shell: executable: /bin/bash cmd: | @@ -162,8 +162,31 @@ mkdir -p {{ ansible_user_dir }}/.kube scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ ansible_user_dir }}/.ssh/id_rsa ubuntu@{{ k3s_fip }}:/etc/rancher/k3s/k3s.yaml {{ ansible_user_dir }}/.kube/config - curl -sLO# https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl - chmod +x ./kubectl; sudo mv ./kubectl /usr/local/bin/kubectl + +- name: Get latest kubectl version + uri: + url: https://dl.k8s.io/release/stable.txt + return_content: yes + register: kubectl_version + retries: 5 + delay: 10 + until: kubectl_version.status == 200 + +- name: Download kubectl binary + get_url: + url: "https://dl.k8s.io/release/{{ kubectl_version.content | trim }}/bin/linux/amd64/kubectl" + dest: /usr/local/bin/kubectl + mode: '0755' + become: true + retries: 5 + delay: 10 + +- name: Set kubectl cluster config + shell: + executable: /bin/bash + cmd: | + set -ex + kubectl config set-cluster default --server=https://{{ k3s_fip }}:6443 --kubeconfig {{ ansible_user_dir }}/.kube/config - name: Wait for k8s node ready