Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/v1alpha3/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion api/v1alpha4/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion api/v1alpha5/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions api/v1alpha6/conditions_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,13 @@ const (
// FloatingIPErrorReason used when the floating ip could not be created or attached.
FloatingIPErrorReason = "FloatingIPError"
)

const (
ClusterReadyCondition clusterv1.ConditionType = "ClusterReady"
// ClusterNotReadyReason used when create the cluster failed.
ClusterNotReadyReason = "ClusterNotReady"
// ClusterReadyReason reports on current status of the OpenStack cluster. Ready indicates the cluster is in a OK state.
ClusterReadyReason = "ClusterReady"
// LoadBalancerReconcileErrorReason used when the openstack create loadbalancer error.
LoadBalancerReconcileErrorReason = "LoadBalancerReconcileError"
)
14 changes: 13 additions & 1 deletion api/v1alpha6/openstackcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ type OpenStackClusterSpec struct {
APIServerLoadBalancer APIServerLoadBalancer `json:"apiServerLoadBalancer,omitempty"`

// DisableFloatingIP determines whether or not to attempt to attach a floating
// IP to the Instance.
// IP to the Instance.
DisableFloatingIP bool `json:"disableFloatingIP"`

// DisableAPIServerFloatingIP determines whether or not to attempt to attach a floating
Expand Down Expand Up @@ -214,6 +214,8 @@ type OpenStackClusterStatus struct {
// and/or logged in the controller's output.
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`

Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down Expand Up @@ -249,3 +251,13 @@ type OpenStackClusterList struct {
func init() {
SchemeBuilder.Register(&OpenStackCluster{}, &OpenStackClusterList{})
}

// GetConditions returns the observations of the operational state of the OpenStackMachine resource.
func (r *OpenStackCluster) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}

// SetConditions sets the underlying service state of the OpenStackMachine to the predescribed clusterv1.Conditions.
func (r *OpenStackCluster) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
2 changes: 1 addition & 1 deletion api/v1alpha6/openstackmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ type OpenStackMachineSpec struct {

// The volume metadata to boot from
RootVolume *RootVolume `json:"rootVolume,omitempty"`

// The custome metadata to boot from
CustomeVolumes []*RootVolume `json:"customeVolumes,omitempty"`

Expand Down
5 changes: 2 additions & 3 deletions api/v1alpha6/openstackmachine_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,14 @@ func (r *OpenStackMachine) ValidateUpdate(old runtime.Object) error {
newOpenStackMachineSpec := newOpenStackMachine["spec"].(map[string]interface{})
oldOpenStackMachineSpec := oldOpenStackMachine["spec"].(map[string]interface{})


// allow changes to providerID always
if oldOpenStackMachineSpec["providerID"] != nil || newOpenStackMachineSpec["providerID"] != nil {
if oldOpenStackMachineSpec["providerID"] != nil || newOpenStackMachineSpec["providerID"] != nil {
delete(oldOpenStackMachineSpec, "providerID")
delete(newOpenStackMachineSpec, "providerID")
}

// allow changes to instanceID always
if oldOpenStackMachineSpec["instanceID"] != nil || newOpenStackMachineSpec["instanceID"] != nil {
if oldOpenStackMachineSpec["instanceID"] != nil || newOpenStackMachineSpec["instanceID"] != nil {
delete(oldOpenStackMachineSpec, "instanceID")
delete(newOpenStackMachineSpec, "instanceID")
}
Expand Down
18 changes: 18 additions & 0 deletions api/v1alpha6/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -5308,6 +5308,52 @@ spec:
- name
- rules
type: object
conditions:
description: Conditions provide observations of the operational state
of a Cluster API resource.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
properties:
lastTransitionTime:
description: Last time the condition transitioned from one status
to another. This should be when the underlying condition changed.
If that is not known, then using the time when the API field
changed is acceptable.
format: date-time
type: string
message:
description: A human readable message indicating details about
the transition. This field may be empty.
type: string
reason:
description: The reason for the condition's last transition
in CamelCase. The specific API may choose whether or not this
field is considered a guaranteed API. This field may not be
empty.
type: string
severity:
description: Severity provides an explicit classification of
Reason code, so the users or machines can immediately understand
the current situation and act accordingly. The Severity field
MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
controlPlaneSecurityGroup:
description: 'ControlPlaneSecurityGroups contains all the information
about the OpenStack Security Group that needs to be applied to control
Expand Down
62 changes: 46 additions & 16 deletions controllers/openstackcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ import (
"context"
"fmt"
"reflect"
"strings"

kerrors "k8s.io/apimachinery/pkg/util/errors"

"sigs.k8s.io/cluster-api/util/conditions"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -104,10 +109,8 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req

// Always patch the openStackCluster when exiting this function so we can persist any OpenStackCluster changes.
defer func() {
if err := patchHelper.Patch(ctx, openStackCluster); err != nil {
if reterr == nil {
reterr = errors.Wrapf(err, "error patching OpenStackCluster %s/%s", openStackCluster.Namespace, openStackCluster.Name)
}
if err := patchCluster(ctx, patchHelper, openStackCluster); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}()

Expand All @@ -132,6 +135,14 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
return reconcileNormal(ctx, scope, patchHelper, cluster, openStackCluster)
}

func patchCluster(ctx context.Context, patchHelper *patch.Helper, openStackCluster *infrav1.OpenStackCluster, options ...patch.Option) error {
err := patchHelper.Patch(ctx, openStackCluster, options...)
if err != nil {
return err
}
return nil
}

func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger.Info("Reconciling Cluster delete")

Expand All @@ -146,15 +157,25 @@ func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch

clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)

var skipLBDeleting bool

if openStackCluster.Spec.APIServerLoadBalancer.Enabled {
loadBalancerService, err := loadbalancer.NewService(scope)
if err != nil {
return reconcile.Result{}, err
if strings.EqualFold(err.Error(), loadbalancer.ErrLoadBalancerNoPoint) {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to init delete load balancer client: %w", err))
skipLBDeleting = true
}
if !skipLBDeleting {
return reconcile.Result{}, err
}
}

if err = loadBalancerService.DeleteLoadBalancer(openStackCluster, clusterName); err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete load balancer: %v", err))
return reconcile.Result{}, errors.Errorf("failed to delete load balancer: %v", err)
if !skipLBDeleting {
if err = loadBalancerService.DeleteLoadBalancer(openStackCluster, clusterName); err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete load balancer: %v", err))
return reconcile.Result{}, errors.Errorf("failed to delete load balancer: %v", err)
}
}
}

Expand Down Expand Up @@ -249,6 +270,10 @@ func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackClus
func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger.Info("Reconciling Cluster")

if openStackCluster.Status.FailureReason != nil || openStackCluster.Status.FailureMessage != nil {
scope.Logger.Info("Not reconciling cluster in failed state. See openStackCluster.status.failureReason, openStackCluster.status.failureMessage, or previously logged error for details")
return ctrl.Result{}, nil
}
// If the OpenStackCluster doesn't have our finalizer, add it.
controllerutil.AddFinalizer(openStackCluster, infrav1.ClusterFinalizer)
// Register the finalizer immediately to avoid orphaning OpenStack resources on delete
Expand Down Expand Up @@ -382,14 +407,14 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC
func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string, userData string) *compute.InstanceSpec {
name := fmt.Sprintf("%s-bastion", clusterName)
instanceSpec := &compute.InstanceSpec{
Name: name,
Flavor: openStackCluster.Spec.Bastion.Instance.Flavor,
SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName,
Image: openStackCluster.Spec.Bastion.Instance.Image,
UserData: userData,
ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID,
FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone,
RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume,
Name: name,
Flavor: openStackCluster.Spec.Bastion.Instance.Flavor,
SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName,
Image: openStackCluster.Spec.Bastion.Instance.Image,
UserData: userData,
ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID,
FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone,
RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume,
DeleteVolumeOnTermination: openStackCluster.Spec.Bastion.Instance.DeleteVolumeOnTermination,
}

Expand Down Expand Up @@ -512,6 +537,10 @@ func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster,
if openStackCluster.Spec.APIServerLoadBalancer.Enabled {
loadBalancerService, err := loadbalancer.NewService(scope)
if err != nil {
if strings.EqualFold(err.Error(), loadbalancer.ErrLoadBalancerNoPoint) {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to init load balancer client: %w", err))
conditions.MarkFalse(openStackCluster, infrav1.ClusterReadyReason, infrav1.LoadBalancerReconcileErrorReason, clusterv1.ConditionSeverityError, err.Error())
}
return err
}

Expand Down Expand Up @@ -616,4 +645,5 @@ func handleUpdateOSCError(openstackCluster *infrav1.OpenStackCluster, message er
err := capierrors.UpdateClusterError
openstackCluster.Status.FailureReason = &err
openstackCluster.Status.FailureMessage = pointer.StringPtr(message.Error())
openstackCluster.Status.Ready = false
}
Loading