From 737e20599eb79289f06b02dfd0b7fe388c2a657d Mon Sep 17 00:00:00 2001 From: Damiano Donati Date: Fri, 19 Sep 2025 12:46:13 +0200 Subject: [PATCH] machine status conversion and syncing --- .../machineset_sync_controller.go | 2 +- .../machinesync/machine_sync_controller.go | 527 +++++++++++++----- .../machine_sync_controller_test.go | 24 +- pkg/conversion/capi2mapi/machine.go | 117 ++++ pkg/conversion/capi2mapi/machine_test.go | 74 ++- pkg/conversion/capi2mapi/machineset.go | 12 +- pkg/conversion/mapi2capi/machine.go | 368 ++++++++++++ pkg/conversion/mapi2capi/machine_test.go | 425 ++++++++++++++ pkg/conversion/test/fuzz/fuzz.go | 219 +++++++- pkg/util/conditions.go | 13 +- pkg/util/sync.go | 174 +++++- 11 files changed, 1779 insertions(+), 176 deletions(-) diff --git a/pkg/controllers/machinesetsync/machineset_sync_controller.go b/pkg/controllers/machinesetsync/machineset_sync_controller.go index 4862cb9bd..21e025bb4 100644 --- a/pkg/controllers/machinesetsync/machineset_sync_controller.go +++ b/pkg/controllers/machinesetsync/machineset_sync_controller.go @@ -1072,7 +1072,7 @@ func setChangedMAPIMachineSetStatusFields(existingMAPIMachineSet, convertedMAPIM existingMAPIMachineSet.Status.ErrorMessage = convertedMAPIMachineSet.Status.ErrorMessage for i := range convertedMAPIMachineSet.Status.Conditions { - existingMAPIMachineSet.Status.Conditions = util.SetMAPIMachineSetCondition(existingMAPIMachineSet.Status.Conditions, &convertedMAPIMachineSet.Status.Conditions[i]) + existingMAPIMachineSet.Status.Conditions = util.SetMAPICondition(existingMAPIMachineSet.Status.Conditions, &convertedMAPIMachineSet.Status.Conditions[i]) } } diff --git a/pkg/controllers/machinesync/machine_sync_controller.go b/pkg/controllers/machinesync/machine_sync_controller.go index 4af1856cc..c507d4cee 100644 --- a/pkg/controllers/machinesync/machine_sync_controller.go +++ b/pkg/controllers/machinesync/machine_sync_controller.go @@ -51,6 +51,8 @@ import ( openstackv1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + conditionsv1beta2 "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/labels/format" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -265,56 +267,56 @@ func (r *MachineSyncReconciler) Reconcile(ctx context.Context, req reconcile.Req // reconcileCAPIMachinetoMAPIMachine reconciles a CAPI Machine to a MAPI Machine. // //nolint:gocognit,funlen, cyclop -func (r *MachineSyncReconciler) reconcileCAPIMachinetoMAPIMachine(ctx context.Context, capiMachine *clusterv1.Machine, mapiMachine *mapiv1beta1.Machine) (ctrl.Result, error) { +func (r *MachineSyncReconciler) reconcileCAPIMachinetoMAPIMachine(ctx context.Context, sourceCAPIMachine *clusterv1.Machine, existingMAPIMachine *mapiv1beta1.Machine) (ctrl.Result, error) { logger := logf.FromContext(ctx) - if capiMachine == nil { - logger.Error(errCAPIMachineNotFound, "machine", mapiMachine.Name) + if sourceCAPIMachine == nil { + logger.Error(errCAPIMachineNotFound, "machine", existingMAPIMachine.Name) if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonCAPIMachineNotFound, errCAPIMachineNotFound.Error(), nil); condErr != nil { + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonCAPIMachineNotFound, errCAPIMachineNotFound.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{errCAPIMachineNotFound, condErr}) } return ctrl.Result{}, errCAPIMachineNotFound } - infraCluster, infraMachine, err := r.fetchCAPIInfraResources(ctx, capiMachine) + infraCluster, infraMachine, err := r.fetchCAPIInfraResources(ctx, sourceCAPIMachine) if err != nil { fetchErr := fmt.Errorf("failed to fetch Cluster API infra resources: %w", err) - if mapiMachine == nil { - r.Recorder.Event(capiMachine, corev1.EventTypeWarning, "SynchronizationWarning", fetchErr.Error()) + if existingMAPIMachine == nil { + r.Recorder.Event(sourceCAPIMachine, corev1.EventTypeWarning, "SynchronizationWarning", fetchErr.Error()) return ctrl.Result{}, fetchErr } if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToGetCAPIInfraResources, fetchErr.Error(), nil); condErr != nil { + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToGetCAPIInfraResources, fetchErr.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{fetchErr, condErr}) } return ctrl.Result{}, fetchErr } - if shouldRequeue, err := r.reconcileCAPItoMAPIMachineDeletion(ctx, capiMachine, infraMachine, mapiMachine); err != nil { + if shouldRequeue, err := r.reconcileCAPItoMAPIMachineDeletion(ctx, sourceCAPIMachine, infraMachine, existingMAPIMachine); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile Cluster API to Machine API machine deletion: %w", err) } else if shouldRequeue { return ctrl.Result{}, nil } - if shouldRequeue, err := r.ensureSyncFinalizer(ctx, mapiMachine, capiMachine, infraMachine); err != nil { + if shouldRequeue, err := r.ensureSyncFinalizer(ctx, existingMAPIMachine, sourceCAPIMachine, infraMachine); err != nil { return ctrl.Result{}, fmt.Errorf("failed to ensure sync finalizer: %w", err) } else if shouldRequeue { return ctrl.Result{}, nil } - newMAPIOwnerReferences, err := r.convertCAPIMachineOwnerReferencesToMAPI(ctx, capiMachine) + newMAPIOwnerReferences, err := r.convertCAPIMachineOwnerReferencesToMAPI(ctx, sourceCAPIMachine) //nolint:nestif if err != nil { var fe *field.Error if errors.As(err, &fe) { - if mapiMachine != nil { - if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, fe.Detail, nil); condErr != nil { + if existingMAPIMachine != nil { + if condErr := r.applySynchronizedConditionWithPatch(ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, fe.Detail, nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{err, condErr}) } } @@ -327,17 +329,17 @@ func (r *MachineSyncReconciler) reconcileCAPIMachinetoMAPIMachine(ctx context.Co return ctrl.Result{}, fmt.Errorf("failed to convert Cluster API machine owner references to Machine API: %w", err) } - newMapiMachine, warns, err := r.convertCAPIToMAPIMachine(capiMachine, infraMachine, infraCluster) + convertedMAPIMachine, warns, err := r.convertCAPIToMAPIMachine(sourceCAPIMachine, infraMachine, infraCluster) if err != nil { conversionErr := fmt.Errorf("failed to convert Cluster API machine to Machine API machine: %w", err) - if mapiMachine == nil { - r.Recorder.Event(capiMachine, corev1.EventTypeWarning, "SynchronizationWarning", conversionErr.Error()) + if existingMAPIMachine == nil { + r.Recorder.Event(sourceCAPIMachine, corev1.EventTypeWarning, "SynchronizationWarning", conversionErr.Error()) return ctrl.Result{}, conversionErr } if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, conversionErr.Error(), nil); condErr != nil { + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, conversionErr.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{conversionErr, condErr}) } @@ -346,42 +348,42 @@ func (r *MachineSyncReconciler) reconcileCAPIMachinetoMAPIMachine(ctx context.Co for _, warning := range warns { logger.Info("Warning during conversion", "warning", warning) - r.Recorder.Event(mapiMachine, corev1.EventTypeWarning, "ConversionWarning", warning) + r.Recorder.Event(existingMAPIMachine, corev1.EventTypeWarning, "ConversionWarning", warning) } - newMapiMachine.SetNamespace(r.MAPINamespace) - newMapiMachine.SetOwnerReferences(newMAPIOwnerReferences) + convertedMAPIMachine.SetNamespace(r.MAPINamespace) + convertedMAPIMachine.SetOwnerReferences(newMAPIOwnerReferences) - if mapiMachine != nil { - newMapiMachine.SetResourceVersion(util.GetResourceVersion(mapiMachine)) + if existingMAPIMachine != nil { + convertedMAPIMachine.SetResourceVersion(util.GetResourceVersion(existingMAPIMachine)) // Restore authoritativeness to the current one. - newMapiMachine.Spec.AuthoritativeAPI = mapiMachine.Spec.AuthoritativeAPI + convertedMAPIMachine.Spec.AuthoritativeAPI = existingMAPIMachine.Spec.AuthoritativeAPI // Restore finalizers to the current one. - newMapiMachine.ObjectMeta.Finalizers = mapiMachine.Finalizers + convertedMAPIMachine.ObjectMeta.Finalizers = existingMAPIMachine.Finalizers } else { // If there is no existing MAPI machine it means we are creating a MAPI machine // from scratch from CAPI one, hence set the authoritativeness for it to Cluster API. - newMapiMachine.Spec.AuthoritativeAPI = mapiv1beta1.MachineAuthorityClusterAPI + convertedMAPIMachine.Spec.AuthoritativeAPI = mapiv1beta1.MachineAuthorityClusterAPI } - if result, err := r.createOrUpdateMAPIMachine(ctx, mapiMachine, newMapiMachine); err != nil { + if result, err := r.createOrUpdateMAPIMachine(ctx, existingMAPIMachine, convertedMAPIMachine); err != nil { createUpdateErr := fmt.Errorf("unable to ensure Machine API machine: %w", err) - if mapiMachine == nil { - r.Recorder.Event(capiMachine, corev1.EventTypeWarning, "SynchronizationWarning", createUpdateErr.Error()) + if existingMAPIMachine == nil { + r.Recorder.Event(sourceCAPIMachine, corev1.EventTypeWarning, "SynchronizationWarning", createUpdateErr.Error()) return ctrl.Result{}, createUpdateErr } if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, createUpdateErr.Error(), nil); condErr != nil { + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertCAPIMachineToMAPI, createUpdateErr.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{createUpdateErr, condErr}) } return result, createUpdateErr } - return ctrl.Result{}, r.applySynchronizedConditionWithPatch(ctx, newMapiMachine, corev1.ConditionTrue, - controllers.ReasonResourceSynchronized, messageSuccessfullySynchronizedCAPItoMAPI, &capiMachine.Generation) + return ctrl.Result{}, r.applySynchronizedConditionWithPatch(ctx, convertedMAPIMachine, corev1.ConditionTrue, + controllers.ReasonResourceSynchronized, messageSuccessfullySynchronizedCAPItoMAPI, &sourceCAPIMachine.Generation) } // reconcileMAPIMachinetoCAPIMachine a MAPI Machine to a CAPI Machine. @@ -390,45 +392,45 @@ func (r *MachineSyncReconciler) reconcileCAPIMachinetoMAPIMachine(ctx context.Co // enforces this. // //nolint:funlen, cyclop, gocognit -func (r *MachineSyncReconciler) reconcileMAPIMachinetoCAPIMachine(ctx context.Context, mapiMachine *mapiv1beta1.Machine, capiMachine *clusterv1.Machine) (ctrl.Result, error) { +func (r *MachineSyncReconciler) reconcileMAPIMachinetoCAPIMachine(ctx context.Context, sourceMAPIMachine *mapiv1beta1.Machine, existingCAPIMachine *clusterv1.Machine) (ctrl.Result, error) { logger := logf.FromContext(ctx) - authoritativeAPI := mapiMachine.Status.AuthoritativeAPI + authoritativeAPI := sourceMAPIMachine.Status.AuthoritativeAPI if authoritativeAPI == mapiv1beta1.MachineAuthorityClusterAPI { logger.Info("AuthoritativeAPI is set to Cluster API, but no Cluster API machine exists. Running an initial Machine API to Cluster API sync") } - _, infraMachine, err := r.fetchCAPIInfraResources(ctx, capiMachine) + _, existingInfraMachine, err := r.fetchCAPIInfraResources(ctx, existingCAPIMachine) if err != nil { fetchErr := fmt.Errorf("failed to fetch Cluster API infra resources: %w", err) if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToGetCAPIInfraResources, fetchErr.Error(), nil); condErr != nil { + ctx, sourceMAPIMachine, corev1.ConditionFalse, reasonFailedToGetCAPIInfraResources, fetchErr.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{fetchErr, condErr}) } return ctrl.Result{}, fetchErr } - if shouldRequeue, err := r.reconcileMAPItoCAPIMachineDeletion(ctx, mapiMachine, capiMachine, infraMachine); err != nil { + if shouldRequeue, err := r.reconcileMAPItoCAPIMachineDeletion(ctx, sourceMAPIMachine, existingCAPIMachine, existingInfraMachine); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile Machine API to Cluster API machine deletion: %w", err) } else if shouldRequeue { return ctrl.Result{}, nil } - if shouldRequeue, err := r.ensureSyncFinalizer(ctx, mapiMachine, capiMachine, infraMachine); err != nil { + if shouldRequeue, err := r.ensureSyncFinalizer(ctx, sourceMAPIMachine, existingCAPIMachine, existingInfraMachine); err != nil { return ctrl.Result{}, fmt.Errorf("failed to ensure sync finalizer: %w", err) } else if shouldRequeue { return ctrl.Result{}, nil } - newCAPIOwnerReferences, err := r.convertMAPIMachineOwnerReferencesToCAPI(ctx, mapiMachine) + convertedCAPIOwnerReferences, err := r.convertMAPIMachineOwnerReferencesToCAPI(ctx, sourceMAPIMachine) //nolint:nestif if err != nil { var fe *field.Error if errors.As(err, &fe) { - if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, fe.Detail, nil); condErr != nil { + if condErr := r.applySynchronizedConditionWithPatch(ctx, sourceMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, fe.Detail, nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{err, condErr}) } @@ -442,17 +444,17 @@ func (r *MachineSyncReconciler) reconcileMAPIMachinetoCAPIMachine(ctx context.Co return ctrl.Result{}, nil } - if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, fmt.Errorf("failed to convert Machine API machine owner references to Cluster API: %w", err).Error(), nil); condErr != nil { + if condErr := r.applySynchronizedConditionWithPatch(ctx, sourceMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, fmt.Errorf("failed to convert Machine API machine owner references to Cluster API: %w", err).Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{err, condErr}) } return ctrl.Result{}, fmt.Errorf("failed to convert Machine API machine owner references to Cluster API: %w", err) } - newCAPIMachine, newCAPIInfraMachine, warns, err := r.convertMAPIToCAPIMachine(mapiMachine) + convertedCAPIMachine, convertedCAPIInfraMachine, warns, err := r.convertMAPIToCAPIMachine(sourceMAPIMachine) if err != nil { conversionErr := fmt.Errorf("failed to convert Machine API machine to Cluster API machine: %w", err) - if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, conversionErr.Error(), nil); condErr != nil { + if condErr := r.applySynchronizedConditionWithPatch(ctx, sourceMAPIMachine, corev1.ConditionFalse, reasonFailedToConvertMAPIMachineToCAPI, conversionErr.Error(), nil); condErr != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{conversionErr, condErr}) } @@ -461,97 +463,99 @@ func (r *MachineSyncReconciler) reconcileMAPIMachinetoCAPIMachine(ctx context.Co for _, warning := range warns { logger.Info("Warning during conversion", "warning", warning) - r.Recorder.Event(mapiMachine, corev1.EventTypeWarning, "ConversionWarning", warning) + r.Recorder.Event(sourceMAPIMachine, corev1.EventTypeWarning, "ConversionWarning", warning) } - if capiMachine != nil { - newCAPIMachine.SetGeneration(capiMachine.GetGeneration()) - newCAPIMachine.SetUID(capiMachine.GetUID()) - newCAPIMachine.SetCreationTimestamp(capiMachine.GetCreationTimestamp()) - newCAPIMachine.SetManagedFields(capiMachine.GetManagedFields()) - newCAPIMachine.SetResourceVersion(util.GetResourceVersion(client.Object(capiMachine))) + if existingCAPIMachine != nil { + convertedCAPIMachine.SetGeneration(existingCAPIMachine.GetGeneration()) + convertedCAPIMachine.SetUID(existingCAPIMachine.GetUID()) + convertedCAPIMachine.SetCreationTimestamp(existingCAPIMachine.GetCreationTimestamp()) + convertedCAPIMachine.SetManagedFields(existingCAPIMachine.GetManagedFields()) + convertedCAPIMachine.SetResourceVersion(util.GetResourceVersion(client.Object(existingCAPIMachine))) // Needed to account for additional labels/annotations that might have been down-propagated in-place // from an authoritative CAPI MachineSet to its existing and non-authoritative child CAPI Machine. // ref: https://github.com/kubernetes-sigs/cluster-api/issues/7731 - newCAPIMachine.Labels = util.MergeMaps(capiMachine.Labels, newCAPIMachine.Labels) - newCAPIMachine.Annotations = util.MergeMaps(capiMachine.Annotations, newCAPIMachine.Annotations) + convertedCAPIMachine.Labels = util.MergeMaps(existingCAPIMachine.Labels, convertedCAPIMachine.Labels) + convertedCAPIMachine.Annotations = util.MergeMaps(existingCAPIMachine.Annotations, convertedCAPIMachine.Annotations) // Restore finalizers. - newCAPIMachine.SetFinalizers(capiMachine.GetFinalizers()) + convertedCAPIMachine.SetFinalizers(existingCAPIMachine.GetFinalizers()) } - newCAPIMachine.SetNamespace(r.CAPINamespace) - newCAPIMachine.Spec.InfrastructureRef.Namespace = r.CAPINamespace - newCAPIMachine.OwnerReferences = newCAPIOwnerReferences + convertedCAPIMachine.SetNamespace(r.CAPINamespace) + convertedCAPIMachine.Spec.InfrastructureRef.Namespace = r.CAPINamespace + convertedCAPIMachine.OwnerReferences = convertedCAPIOwnerReferences - if len(newCAPIMachine.OwnerReferences) == 1 && newCAPIMachine.OwnerReferences[0].Kind == machineSetKind { + if len(convertedCAPIMachine.OwnerReferences) == 1 && convertedCAPIMachine.OwnerReferences[0].Kind == machineSetKind { // For CAPI Machine that is owned by a CAPI MachineSet we must set the clusterv1.MachineSetNameLabel // as this is what the CAPI machineset controller sets on the CAPI Machine when it creates it, an it is then later used // by other CAPI tooling for filtering purposes. // This check should be safe as in the above convertMAPIMachineOwnerReferencesToCAPI(), we make sure // there is only one owning MachineSet reference for a machine, if any. - newCAPIMachine.Labels[clusterv1.MachineSetNameLabel] = format.MustFormatValue(newCAPIMachine.OwnerReferences[0].Name) + convertedCAPIMachine.Labels[clusterv1.MachineSetNameLabel] = format.MustFormatValue(convertedCAPIMachine.OwnerReferences[0].Name) } if authoritativeAPI == mapiv1beta1.MachineAuthorityMachineAPI { // Set the paused annotation on the new CAPI Machine, if the authoritativeAPI is Machine API, // as we want the new CAPI Machine to be initially paused when the MAPI Machine is the authoritative one. - // For the other case instead (authoritativeAPI == machinev1beta1.MachineAuthorityClusterAPI), + // For the other case instead (authoritativeAPI == mapiv1beta1.MachineAuthorityClusterAPI), // when the new CAPI Machine that is being created is also expected to be the authority // (i.e. in cases where the MAPI Machine is created as .spec.authoritativeAPI: ClusterAPI), we do not want to create it paused. - annotations.AddAnnotations(newCAPIMachine, map[string]string{clusterv1.PausedAnnotation: ""}) + annotations.AddAnnotations(convertedCAPIMachine, map[string]string{clusterv1.PausedAnnotation: ""}) } - if !util.IsNilObject(infraMachine) { - newCAPIInfraMachine.SetGeneration(infraMachine.GetGeneration()) - newCAPIInfraMachine.SetUID(infraMachine.GetUID()) - newCAPIInfraMachine.SetCreationTimestamp(infraMachine.GetCreationTimestamp()) - newCAPIInfraMachine.SetManagedFields(infraMachine.GetManagedFields()) - newCAPIInfraMachine.SetResourceVersion(util.GetResourceVersion(infraMachine)) + if !util.IsNilObject(existingInfraMachine) { + convertedCAPIInfraMachine.SetGeneration(existingInfraMachine.GetGeneration()) + convertedCAPIInfraMachine.SetUID(existingInfraMachine.GetUID()) + convertedCAPIInfraMachine.SetCreationTimestamp(existingInfraMachine.GetCreationTimestamp()) + convertedCAPIInfraMachine.SetManagedFields(existingInfraMachine.GetManagedFields()) + convertedCAPIInfraMachine.SetResourceVersion(util.GetResourceVersion(existingInfraMachine)) // Needed to account for additional labels/annotations that might have been down-propagated in-place // from an authoritative CAPI MachineSet to its existing and non-authoritative child CAPI Machine. // ref: https://github.com/kubernetes-sigs/cluster-api/issues/7731 - newCAPIInfraMachine.SetLabels(util.MergeMaps(infraMachine.GetLabels(), newCAPIInfraMachine.GetLabels())) - newCAPIInfraMachine.SetAnnotations(util.MergeMaps(infraMachine.GetAnnotations(), newCAPIInfraMachine.GetAnnotations())) + convertedCAPIInfraMachine.SetLabels(util.MergeMaps(existingInfraMachine.GetLabels(), convertedCAPIInfraMachine.GetLabels())) + convertedCAPIInfraMachine.SetAnnotations(util.MergeMaps(existingInfraMachine.GetAnnotations(), convertedCAPIInfraMachine.GetAnnotations())) // Restore finalizers. - newCAPIInfraMachine.SetFinalizers(infraMachine.GetFinalizers()) + convertedCAPIInfraMachine.SetFinalizers(existingInfraMachine.GetFinalizers()) } - newCAPIInfraMachine.SetNamespace(r.CAPINamespace) + convertedCAPIInfraMachine.SetNamespace(r.CAPINamespace) if authoritativeAPI == mapiv1beta1.MachineAuthorityMachineAPI { // Set the paused annotation on the new CAPI Infra Machine, if the authoritativeAPI is Machine API, // as we want the new CAPI Infra Machine to be initially paused when the MAPI Machine is the authoritative one. - // For the other case instead (authoritativeAPI == machinev1beta1.MachineAuthorityClusterAPI), + // For the other case instead (authoritativeAPI == mapiv1beta1.MachineAuthorityClusterAPI), // when the new CAPI Infra Machine that is being created is also expected to be the authority // (i.e. in cases where the MAPI Machine is created as .spec.authoritativeAPI: ClusterAPI), we do not want to create it paused. - annotations.AddAnnotations(newCAPIInfraMachine, map[string]string{clusterv1.PausedAnnotation: ""}) + annotations.AddAnnotations(convertedCAPIInfraMachine, map[string]string{clusterv1.PausedAnnotation: ""}) } - if result, err := r.createOrUpdateCAPIMachine(ctx, mapiMachine, capiMachine, newCAPIMachine); err != nil { - return result, fmt.Errorf("unable to ensure Cluster API machine: %w", err) + // Create or update the CAPI machine. + existingCAPIMachine, err = r.createOrUpdateCAPIMachine(ctx, sourceMAPIMachine, existingCAPIMachine, convertedCAPIMachine) + if err != nil { + return ctrl.Result{}, fmt.Errorf("unable to ensure Cluster API machine: %w", err) } - newCAPIInfraMachine.SetOwnerReferences([]metav1.OwnerReference{{ + convertedCAPIInfraMachine.SetOwnerReferences([]metav1.OwnerReference{{ APIVersion: clusterv1.GroupVersion.String(), Kind: machineKind, - Name: newCAPIMachine.Name, - UID: newCAPIMachine.UID, + Name: existingCAPIMachine.Name, + UID: existingCAPIMachine.UID, Controller: ptr.To(true), BlockOwnerDeletion: ptr.To(true), }}) - result, syncronizationIsProgressing, err := r.createOrUpdateCAPIInfraMachine(ctx, mapiMachine, infraMachine, newCAPIInfraMachine) + result, syncronizationIsProgressing, err := r.createOrUpdateCAPIInfraMachine(ctx, sourceMAPIMachine, existingInfraMachine, convertedCAPIInfraMachine) if err != nil { return result, fmt.Errorf("unable to ensure Cluster API infra machine: %w", err) } if syncronizationIsProgressing { - return ctrl.Result{RequeueAfter: time.Second * 1}, r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionUnknown, + return ctrl.Result{RequeueAfter: time.Second * 1}, r.applySynchronizedConditionWithPatch(ctx, sourceMAPIMachine, corev1.ConditionUnknown, reasonProgressingToCreateCAPIInfraMachine, progressingToSynchronizeMAPItoCAPI, nil) } - return ctrl.Result{}, r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionTrue, - controllers.ReasonResourceSynchronized, messageSuccessfullySynchronizedMAPItoCAPI, &mapiMachine.Generation) + return ctrl.Result{}, r.applySynchronizedConditionWithPatch(ctx, sourceMAPIMachine, corev1.ConditionTrue, + controllers.ReasonResourceSynchronized, messageSuccessfullySynchronizedMAPItoCAPI, &sourceMAPIMachine.Generation) } // convertMAPIToCAPIMachine converts a MAPI Machine to a CAPI Machine, selecting the correct converter based on the platform. @@ -601,8 +605,8 @@ func (r *MachineSyncReconciler) convertCAPIToMAPIMachine(capiMachine *clusterv1. // createOrUpdateCAPIInfraMachine creates a CAPI infra machine from a MAPI machine, or updates if it exists and it is out of date. // -//nolint:funlen -func (r *MachineSyncReconciler) createOrUpdateCAPIInfraMachine(ctx context.Context, mapiMachine *mapiv1beta1.Machine, infraMachine client.Object, newCAPIInfraMachine client.Object) (ctrl.Result, bool, error) { //nolint:unparam +//nolint:funlen,unparam +func (r *MachineSyncReconciler) createOrUpdateCAPIInfraMachine(ctx context.Context, mapiMachine *mapiv1beta1.Machine, infraMachine client.Object, newCAPIInfraMachine client.Object) (ctrl.Result, bool, error) { logger := logf.FromContext(ctx) // This variable tracks whether or not we are still progressing // towards syncronizing the MAPI machine with the CAPI infra machine. @@ -707,95 +711,133 @@ func (r *MachineSyncReconciler) createOrUpdateCAPIInfraMachine(ctx context.Conte return ctrl.Result{}, syncronizationIsProgressing, nil } -// createOrUpdateCAPIMachine creates a CAPI machine from a MAPI one, or updates if it exists and it is out of date. -func (r *MachineSyncReconciler) createOrUpdateCAPIMachine(ctx context.Context, mapiMachine *mapiv1beta1.Machine, capiMachine *clusterv1.Machine, newCAPIMachine *clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam +// ensureCAPIMachine creates a new CAPI machine if one doesn't exist. +func (r *MachineSyncReconciler) ensureCAPIMachine(ctx context.Context, sourceMAPIMachine *mapiv1beta1.Machine, existingCAPIMachine, convertedCAPIMachine *clusterv1.Machine) (*clusterv1.Machine, error) { + // If there is an existing CAPI machine, no need to create one. + if existingCAPIMachine != nil { + return existingCAPIMachine, nil + } + logger := logf.FromContext(ctx) - if capiMachine == nil { - if err := r.Create(ctx, newCAPIMachine); err != nil { - logger.Error(err, "Failed to create Cluster API machine") + // Set the existing CAPI machine to the converted CAPI machine. + existingCAPIMachine = convertedCAPIMachine.DeepCopy() - createErr := fmt.Errorf("failed to create Cluster API machine: %w", err) - if condErr := r.applySynchronizedConditionWithPatch( - ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToCreateCAPIMachine, createErr.Error(), nil); condErr != nil { - return ctrl.Result{}, utilerrors.NewAggregate([]error{createErr, condErr}) - } + if err := r.Create(ctx, existingCAPIMachine); err != nil { + logger.Error(err, "Failed to create Cluster API machine") - return ctrl.Result{}, createErr + createErr := fmt.Errorf("failed to create Cluster API machine: %w", err) + if condErr := r.applySynchronizedConditionWithPatch( + ctx, sourceMAPIMachine, corev1.ConditionFalse, reasonFailedToCreateCAPIMachine, createErr.Error(), nil); condErr != nil { + return existingCAPIMachine, utilerrors.NewAggregate([]error{createErr, condErr}) } - logger.Info("Successfully created Cluster API machine") - - return ctrl.Result{}, nil + return nil, createErr } - capiMachinesDiff := compareCAPIMachines(capiMachine, newCAPIMachine) + logger.Info("Successfully created Cluster API machine", "name", existingCAPIMachine.Name) - if len(capiMachinesDiff) == 0 { - logger.Info("No changes detected in Cluster API machine") - return ctrl.Result{}, nil + return existingCAPIMachine, nil +} + +// ensureCAPIMachineSpecUpdated updates the Cluster API machine if changes are detected to the spec, metadata or provider spec. +func (r *MachineSyncReconciler) ensureCAPIMachineSpecUpdated(ctx context.Context, mapiMachine *mapiv1beta1.Machine, capiMachinesDiff map[string]any, updatedCAPIMachine *clusterv1.Machine) (bool, error) { + logger := logf.FromContext(ctx) + + // If there are no spec changes, return early. + if !hasSpecOrMetadataOrProviderSpecChanges(capiMachinesDiff) { + return false, nil } - logger.Info("Changes detected, updating Cluster API machine", "diff", fmt.Sprintf("%+v", capiMachinesDiff)) + logger.Info("Changes detected for Cluster API machine. Updating it", "diff", fmt.Sprintf("%+v", capiMachinesDiff)) - if err := r.Update(ctx, newCAPIMachine); err != nil { + if err := r.Update(ctx, updatedCAPIMachine); err != nil { logger.Error(err, "Failed to update Cluster API machine") updateErr := fmt.Errorf("failed to update Cluster API machine: %w", err) if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToUpdateCAPIMachine, updateErr.Error(), nil); condErr != nil { - return ctrl.Result{}, utilerrors.NewAggregate([]error{updateErr, condErr}) + return false, utilerrors.NewAggregate([]error{updateErr, condErr}) } - return ctrl.Result{}, updateErr + return false, updateErr } - logger.Info("Successfully updated Cluster API machine") - - return ctrl.Result{}, nil + return true, nil } -// createOrUpdateMAPIMachine creates a MAPI machine from a CAPI one, or updates -// if it exists and it is out of date. -func (r *MachineSyncReconciler) createOrUpdateMAPIMachine(ctx context.Context, mapiMachine *mapiv1beta1.Machine, newMAPIMachine *mapiv1beta1.Machine) (ctrl.Result, error) { //nolint:unparam +// createOrUpdateCAPIMachine creates or updates (if existing but out of date) a CAPI machine from a convertedCAPIMachine (CAPI machine object converted from MAPI). +// it returns the CAPI machine, existing or newly created. +func (r *MachineSyncReconciler) createOrUpdateCAPIMachine(ctx context.Context, sourceMAPIMachine *mapiv1beta1.Machine, existingCAPIMachine, convertedCAPIMachine *clusterv1.Machine) (*clusterv1.Machine, error) { logger := logf.FromContext(ctx) - if mapiMachine == nil { - if err := r.Create(ctx, newMAPIMachine); err != nil { - logger.Error(err, "Failed to create Machine API machine") - return ctrl.Result{}, fmt.Errorf("failed to create Machine API machine: %w", err) - } + // If there is no existing CAPI machine, create a new one. + existingCAPIMachine, err := r.ensureCAPIMachine(ctx, sourceMAPIMachine, existingCAPIMachine, convertedCAPIMachine) + if err != nil { + return nil, fmt.Errorf("failed to ensure Cluster API machine: %w", err) + } + // There already is an existing CAPI machine, work out if it needs updating. - logger.Info("Successfully created Machine API machine") + // Compare the existing CAPI machine with the desired CAPI machine to check for changes. + capiMachinesDiff := compareCAPIMachines(existingCAPIMachine, convertedCAPIMachine) - return ctrl.Result{}, nil + // Update the CAPI machine spec/metadata/provider spec if needed. + specUpdated, err := r.ensureCAPIMachineSpecUpdated(ctx, sourceMAPIMachine, capiMachinesDiff, convertedCAPIMachine) + if err != nil { + return nil, fmt.Errorf("failed to ensure Cluster API machine spec updated: %w", err) } - mapiMachinesDiff, err := compareMAPIMachines(mapiMachine, newMAPIMachine) + // Update the CAPI machine status if needed. + statusUpdated, err := r.ensureCAPIMachineStatusUpdated(ctx, sourceMAPIMachine, existingCAPIMachine, convertedCAPIMachine, convertedCAPIMachine, capiMachinesDiff, specUpdated) if err != nil { - return ctrl.Result{}, fmt.Errorf("unable to compare Machine API machines: %w", err) + return nil, fmt.Errorf("failed to ensure Cluster API machine status updated: %w", err) } - if len(mapiMachinesDiff) == 0 { - logger.Info("No changes detected in Machine API machine") - return ctrl.Result{}, nil + if specUpdated || statusUpdated { + logger.Info("Successfully updated Cluster API machine") + } else { + logger.Info("No changes detected for Cluster API machine") } - logger.Info("Changes detected, updating Machine API machine", "diff", mapiMachinesDiff) + return existingCAPIMachine, nil +} - if err := r.Update(ctx, newMAPIMachine); err != nil { - logger.Error(err, "Failed to update Machine API machine") +// createOrUpdateMAPIMachine creates or updates (if existing but out of date) a MAPI machine from a convertedMAPIMachine (MAPI machine object converted from CAPI). +// +//nolint:unparam +func (r *MachineSyncReconciler) createOrUpdateMAPIMachine(ctx context.Context, existingMAPIMachine *mapiv1beta1.Machine, convertedMAPIMachine *mapiv1beta1.Machine) (ctrl.Result, error) { + logger := logf.FromContext(ctx) - updateErr := fmt.Errorf("failed to update Machine API machine: %w", err) + // If there is no existing MAPI machine, create a new one. + existingMAPIMachine, err := r.ensureMAPIMachine(ctx, existingMAPIMachine, convertedMAPIMachine) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to ensure Machine API machine: %w", err) + } + // There already is an existing MAPI machine, work out if it needs updating. - if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToUpdateMAPIMachine, updateErr.Error(), nil); condErr != nil { - return ctrl.Result{}, utilerrors.NewAggregate([]error{updateErr, condErr}) - } + // Compare the existing MAPI machine with the converted MAPI machine to check for changes. + mapiMachinesDiff, err := compareMAPIMachines(existingMAPIMachine, convertedMAPIMachine) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to compare Machine API machines: %w", err) + } + + // Update the MAPI machine spec if needed. + specUpdated, err := r.ensureMAPIMachineSpecUpdated(ctx, existingMAPIMachine, mapiMachinesDiff, convertedMAPIMachine) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to ensure Machine API machine spec updated: %w", err) + } - return ctrl.Result{}, updateErr + // Update the MAPI machine status if needed. + statusUpdated, err := r.ensureMAPIMachineStatusUpdated(ctx, existingMAPIMachine, convertedMAPIMachine, mapiMachinesDiff, specUpdated) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to ensure Machine API machine status updated: %w", err) } - logger.Info("Successfully updated Machine API machine") + if specUpdated || statusUpdated { + logger.Info("Successfully updated Machine API machine") + } else { + logger.Info("No changes detected for Machine API machine") + } return ctrl.Result{}, nil } @@ -1288,6 +1330,10 @@ func compareCAPIMachines(capiMachine1, capiMachine2 *clusterv1.Machine) map[stri diff[".metadata"] = diffObjectMeta } + if diffStatus := util.CAPIMachineStatusEqual(capiMachine1.Status, capiMachine2.Status); len(diffStatus) > 0 { + diff[".status"] = diffStatus + } + return diff } @@ -1338,6 +1384,10 @@ func compareMAPIMachines(a, b *mapiv1beta1.Machine) (map[string]any, error) { diff[".metadata"] = diffObjectMeta } + if diffStatus := util.MAPIMachineStatusEqual(a.Status, b.Status); len(diffStatus) > 0 { + diff[".status"] = diffStatus + } + return diff, nil } @@ -1414,6 +1464,212 @@ func compareCAPIInfraMachines(platform configv1.PlatformType, infraMachine1, inf } } +// ensureCAPIMachineStatusUpdated updates the CAPI machine status if changes are detected and conditions are met. +func (r *MachineSyncReconciler) ensureCAPIMachineStatusUpdated(ctx context.Context, mapiMachine *mapiv1beta1.Machine, existingCAPIMachine, convertedCAPIMachine, updatedOrCreatedCAPIMachine *clusterv1.Machine, capiMachinesDiff map[string]any, specUpdated bool) (bool, error) { + logger := logf.FromContext(ctx) + + // If there are spec changes we always want to update the status to sync the spec generation. + // If there are status changes we always want to update the status. + // If both the above are false, we can skip updating the status and return early. + if !specUpdated && !hasStatusChanges(capiMachinesDiff) { + return false, nil + } + + // If the source API object (MAPI Machine) status.synchronizedGeneration does not match the objectmeta.generation + // it means the source API object status has not yet caught up with the desired spec, + // so we don't want to update the CAPI machine status until that has happened. + if mapiMachine.Status.SynchronizedGeneration != mapiMachine.ObjectMeta.Generation { + logger.Info("Changes detected for Cluster API machine status, but the MAPI machine spec has not been observed yet, skipping status update") + + return false, nil + } + + patchBase := client.MergeFrom(existingCAPIMachine.DeepCopy()) + setChangedCAPIMachineStatusFields(existingCAPIMachine, convertedCAPIMachine) + + // Update the observed generation to match the updated source API object generation. + existingCAPIMachine.Status.ObservedGeneration = updatedOrCreatedCAPIMachine.ObjectMeta.Generation + + isPatchRequired, err := util.IsPatchRequired(existingCAPIMachine, patchBase) + if err != nil { + return false, fmt.Errorf("failed to check if patch is required: %w", err) + } + + if !isPatchRequired { + // If the patch is not required, return early. + return false, nil + } + + logger.Info("Changes detected for Cluster API machine status. Updating it") + + if err := r.Status().Patch(ctx, existingCAPIMachine, patchBase); err != nil { + logger.Error(err, "Failed to update Cluster API machine status") + updateErr := fmt.Errorf("failed to update status: %w", err) + + if condErr := r.applySynchronizedConditionWithPatch(ctx, mapiMachine, corev1.ConditionFalse, reasonFailedToUpdateCAPIMachine, updateErr.Error(), nil); condErr != nil { + return false, utilerrors.NewAggregate([]error{updateErr, condErr}) + } + + return false, updateErr + } + + return true, nil +} + +// ensureMAPIMachine creates a new MAPI machine if one doesn't exist. +func (r *MachineSyncReconciler) ensureMAPIMachine(ctx context.Context, existingMAPIMachine *mapiv1beta1.Machine, convertedMAPIMachine *mapiv1beta1.Machine) (*mapiv1beta1.Machine, error) { + // If there is an existing MAPI machine, no need to create one. + if existingMAPIMachine != nil { + return existingMAPIMachine, nil + } + + logger := logf.FromContext(ctx) + + // Set the existing MAPI machine to the converted MAPI machine. + existingMAPIMachine = convertedMAPIMachine.DeepCopy() + + if err := r.Create(ctx, convertedMAPIMachine); err != nil { + logger.Error(err, "Failed to create Machine API machine") + + createErr := fmt.Errorf("failed to create Machine API machine: %w", err) + if condErr := r.applySynchronizedConditionWithPatch( + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToCreateMAPIMachine, createErr.Error(), nil); condErr != nil { + return nil, utilerrors.NewAggregate([]error{createErr, condErr}) + } + + return nil, createErr + } + + logger.Info("Successfully created Machine API machine", "name", existingMAPIMachine.Name) + + return existingMAPIMachine, nil +} + +// ensureMAPIMachineStatusUpdated updates the MAPI machine status if changes are detected. +func (r *MachineSyncReconciler) ensureMAPIMachineStatusUpdated(ctx context.Context, existingMAPIMachine *mapiv1beta1.Machine, convertedMAPIMachine *mapiv1beta1.Machine, mapiMachinesDiff map[string]any, specUpdated bool) (bool, error) { + logger := logf.FromContext(ctx) + + // If there are no status changes and the spec has not been updated, return early. + if !hasStatusChanges(mapiMachinesDiff) && !specUpdated { + return false, nil + } + + logger.Info("Changes detected for Machine API machine status. Updating it", "diff", fmt.Sprintf("%+v", mapiMachinesDiff)) + + patchBase := client.MergeFrom(existingMAPIMachine.DeepCopy()) + + // Set the changed MAPI machine status fields from the converted MAPI machine. + setChangedMAPIMachineStatusFields(existingMAPIMachine, convertedMAPIMachine) + + // Here we would've updated the observed generation to match the updated source API object generation. + // MAPI Machine does not have the observed generation field. + + isPatchRequired, err := util.IsPatchRequired(existingMAPIMachine, patchBase) + if err != nil { + return false, fmt.Errorf("failed to check if patch is required: %w", err) + } + + if !isPatchRequired { + // If the patch is not required, return early. + return false, nil + } + + if err := r.Status().Patch(ctx, existingMAPIMachine, patchBase); err != nil { + logger.Error(err, "Failed to update Machine API machine status") + updateErr := fmt.Errorf("failed to update status: %w", err) + + if condErr := r.applySynchronizedConditionWithPatch( + ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToUpdateMAPIMachine, updateErr.Error(), nil); condErr != nil { + return false, utilerrors.NewAggregate([]error{updateErr, condErr}) + } + + return false, updateErr + } + + return true, nil +} + +// ensureMAPIMachineSpecUpdated updates the MAPI machine if changes are detected. +func (r *MachineSyncReconciler) ensureMAPIMachineSpecUpdated(ctx context.Context, existingMAPIMachine *mapiv1beta1.Machine, mapiMachinesDiff map[string]any, updatedMAPIMachine *mapiv1beta1.Machine) (bool, error) { + logger := logf.FromContext(ctx) + + // If there are no spec changes, return early. + if !hasSpecOrMetadataOrProviderSpecChanges(mapiMachinesDiff) { + return false, nil + } + + logger.Info("Changes detected for Machine API machine. Updating it", "diff", fmt.Sprintf("%+v", mapiMachinesDiff)) + + if err := r.Update(ctx, updatedMAPIMachine); err != nil { + logger.Error(err, "Failed to update Machine API machine") + + updateErr := fmt.Errorf("failed to update Machine API machine: %w", err) + + if condErr := r.applySynchronizedConditionWithPatch(ctx, existingMAPIMachine, corev1.ConditionFalse, reasonFailedToUpdateMAPIMachine, updateErr.Error(), nil); condErr != nil { + return false, utilerrors.NewAggregate([]error{updateErr, condErr}) + } + + return false, updateErr + } + + return true, nil +} + +// setChangedCAPIMachineStatusFields sets the updated fields in the CAPI machine status. +func setChangedCAPIMachineStatusFields(existingCAPIMachine, convertedCAPIMachine *clusterv1.Machine) { + // convertedCAPIMachine holds the computed and desired status changes converted from the source MAPI machine, so apply them to the existing existingCAPIMachine. + // Merge the v1beta1 conditions. + for i := range convertedCAPIMachine.Status.Conditions { + conditions.Set(existingCAPIMachine, &convertedCAPIMachine.Status.Conditions[i]) + } + + // Copy them back to the convertedCAPIMachine. + convertedCAPIMachine.Status.Conditions = existingCAPIMachine.Status.Conditions + + // Merge the v1beta2 conditions. + if convertedCAPIMachine.Status.V1Beta2 != nil { + if existingCAPIMachine.Status.V1Beta2 == nil { + existingCAPIMachine.Status.V1Beta2 = &clusterv1.MachineV1Beta2Status{} + } + + for i := range convertedCAPIMachine.Status.V1Beta2.Conditions { + conditionsv1beta2.Set(existingCAPIMachine, convertedCAPIMachine.Status.V1Beta2.Conditions[i]) + } + + // Copy them back to the convertedCAPIMachine. + convertedCAPIMachine.Status.V1Beta2.Conditions = existingCAPIMachine.Status.V1Beta2.Conditions + } + + // Finally overwrite the entire existingCAPIMachine status with the convertedCAPIMachine status. + existingCAPIMachine.Status = convertedCAPIMachine.Status +} + +// setChangedMAPIMachineStatusFields sets the updated fields in the MAPI machine status. +func setChangedMAPIMachineStatusFields(existingMAPIMachine, convertedMAPIMachine *mapiv1beta1.Machine) { + // convertedMAPIMachine holds the computed and desired status changes, so apply them to the existing existingMAPIMachine. + for i := range convertedMAPIMachine.Status.Conditions { + existingMAPIMachine.Status.Conditions = util.SetMAPICondition(existingMAPIMachine.Status.Conditions, &convertedMAPIMachine.Status.Conditions[i]) + } + + // Copy them back to the convertedMAPIMachine. + convertedMAPIMachine.Status.Conditions = existingMAPIMachine.Status.Conditions + + // Copy the other fields that are not present in the convertedMAPIMachine from the existingMAPIMachine. + convertedMAPIMachine.Status.AuthoritativeAPI = existingMAPIMachine.Status.AuthoritativeAPI + convertedMAPIMachine.Status.SynchronizedGeneration = existingMAPIMachine.Status.SynchronizedGeneration + convertedMAPIMachine.Status.LastOperation = existingMAPIMachine.Status.LastOperation + convertedMAPIMachine.Status.ProviderStatus = existingMAPIMachine.Status.ProviderStatus + + // Finally overwrite the entire existingMAPIMachine status with the convertedMAPIMachine status. + existingMAPIMachine.Status = convertedMAPIMachine.Status +} + +// hasStatusChanges checks if there are status-related changes in the diff. +func hasStatusChanges(diff map[string]any) bool { + _, hasStatus := diff[".status"] + return hasStatus +} + // applySynchronizedConditionWithPatch updates the synchronized condition // using a server side apply patch. We do this to force ownership of the // 'Synchronized' condition and 'SynchronizedGeneration'. @@ -1423,3 +1679,12 @@ func (r *MachineSyncReconciler) applySynchronizedConditionWithPatch(ctx context. machinev1applyconfigs.Machine, mapiMachine, status, reason, message, generation) } + +// hasSpecOrMetadataOrProviderSpecChanges checks if there are spec, metadata, or provider spec changes in the diff. +func hasSpecOrMetadataOrProviderSpecChanges(diff map[string]any) bool { + _, hasSpec := diff[".spec"] + _, hasMetadata := diff[".metadata"] + _, hasProviderSpec := diff[".providerSpec"] + + return hasSpec || hasMetadata || hasProviderSpec +} diff --git a/pkg/controllers/machinesync/machine_sync_controller_test.go b/pkg/controllers/machinesync/machine_sync_controller_test.go index 3c5cf082a..939e73303 100644 --- a/pkg/controllers/machinesync/machine_sync_controller_test.go +++ b/pkg/controllers/machinesync/machine_sync_controller_test.go @@ -127,7 +127,7 @@ var _ = Describe("With a running MachineSync Reconciler", func() { // reference it on the CAPI Machine capaMachineBuilder = awsv1resourcebuilder.AWSMachine(). WithNamespace(capiNamespace.GetName()). - WithName("machine-template") + WithName("foo") mapiMachineSetBuilder = machinev1resourcebuilder.MachineSet(). WithNamespace(mapiNamespace.GetName()). @@ -138,7 +138,7 @@ var _ = Describe("With a running MachineSync Reconciler", func() { // reference it on the CAPI MachineSet capaMachineTemplateBuilder := awsv1resourcebuilder.AWSMachineTemplate(). WithNamespace(capiNamespace.GetName()). - WithName("machine-template") + WithName("foo") capaMachineTemplate := capaMachineTemplateBuilder.Build() @@ -168,7 +168,7 @@ var _ = Describe("With a running MachineSync Reconciler", func() { mapiMachineBuilder = machinev1resourcebuilder.Machine(). WithNamespace(mapiNamespace.GetName()). - WithGenerateName("foo"). + WithName("foo"). WithProviderSpecBuilder(machinev1resourcebuilder.AWSProviderSpec().WithLoadBalancers(nil)) capiMachineBuilder = clusterv1resourcebuilder.Machine(). @@ -333,11 +333,11 @@ var _ = Describe("With a running MachineSync Reconciler", func() { BeforeEach(func() { By("Creating the MAPI machine") - mapiMachine = mapiMachineBuilder.WithName("test-machine").Build() + mapiMachine = mapiMachineBuilder.Build() Eventually(k8sClient.Create(ctx, mapiMachine)).Should(Succeed(), "mapi machine should be able to be created") By("Creating the CAPI Machine") - capiMachine = capiMachineBuilder.WithName("test-machine").Build() + capiMachine = capiMachineBuilder.Build() Eventually(k8sClient.Create(ctx, capiMachine)).Should(Succeed(), "capi machine should be able to be created") By("Setting the MAPI machine AuthoritativeAPI to Cluster API") @@ -514,21 +514,21 @@ var _ = Describe("With a running MachineSync Reconciler", func() { Context("when the MAPI machine does not exist and the CAPI machine does", func() { Context("and there is no CAPI machineSet owning the machine", func() { BeforeEach(func() { - capiMachine = capiMachineBuilder.WithName("test-machine-no-machineset").Build() + By("Creating the CAPI machine") + capiMachine = capiMachineBuilder.Build() Eventually(k8sClient.Create(ctx, capiMachine)).Should(Succeed(), "capi machine should be able to be created") - capaMachine = capaMachineBuilder.WithName("test-machine-no-machineset").WithOwnerReferences([]metav1.OwnerReference{ - { + By("Updating the CAPA machine adding the CAPI machine as an owner") + Eventually(k.Update(capaMachine, func() { + capaMachine.OwnerReferences = append(capaMachine.OwnerReferences, metav1.OwnerReference{ Kind: machineKind, APIVersion: clusterv1.GroupVersion.String(), Name: capiMachine.Name, UID: capiMachine.UID, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(false), - }, - }).Build() - - Eventually(k8sClient.Create(ctx, capaMachine)).Should(Succeed(), "capa machine should be able to be created") + }) + })).Should(Succeed(), "capa machine should be able to be updated") }) It("should not make any changes to the CAPI machine", func() { diff --git a/pkg/conversion/capi2mapi/machine.go b/pkg/conversion/capi2mapi/machine.go index 284f51e05..4f8fe5197 100644 --- a/pkg/conversion/capi2mapi/machine.go +++ b/pkg/conversion/capi2mapi/machine.go @@ -21,9 +21,12 @@ import ( mapiv1beta1 "github.com/openshift/api/machine/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capierrors "sigs.k8s.io/cluster-api/errors" ) const ( @@ -36,6 +39,11 @@ func fromCAPIMachineToMAPIMachine(capiMachine *clusterv1.Machine) (*mapiv1beta1. lifecycleHooks, capiMachineNonHookAnnotations := convertCAPILifecycleHookAnnotationsToMAPILifecycleHooksAndAnnotations(capiMachine.Annotations) + mapiMachineStatus, machineStatusErrs := convertCAPIMachineStatusToMAPI(capiMachine.Status) + if len(machineStatusErrs) > 0 { + errs = append(errs, machineStatusErrs...) + } + mapiMachine := &mapiv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: capiMachine.Name, @@ -55,6 +63,7 @@ func fromCAPIMachineToMAPIMachine(capiMachine *clusterv1.Machine) (*mapiv1beta1. // ProviderSpec: // ProviderSpec MUST NOT be populated here. It is added later by higher level fuctions. // Taints: // TODO(OCPCLOUD-2861): Taint propagation from Machines to Nodes is not yet implemented in CAPI. }, + Status: mapiMachineStatus, } // Unused fields - Below this line are fields not used from the CAPI Machine. @@ -99,6 +108,114 @@ func fromCAPIMachineToMAPIMachine(capiMachine *clusterv1.Machine) (*mapiv1beta1. return mapiMachine, nil } +// convertCAPIMachineStatusToMAPI converts a CAPI MachineStatus to MAPI format. +func convertCAPIMachineStatusToMAPI(capiStatus clusterv1.MachineStatus) (mapiv1beta1.MachineStatus, field.ErrorList) { + errs := field.ErrorList{} + + addresses, addressesErr := convertCAPIMachineAddressesToMAPI(capiStatus.Addresses) + if addressesErr != nil { + errs = append(errs, addressesErr...) + } + + mapiStatus := mapiv1beta1.MachineStatus{ + NodeRef: capiStatus.NodeRef, + LastUpdated: capiStatus.LastUpdated, + // Conditions: // TODO(OCPCLOUD-3193): Add MAPI conditions when they are implemented. + ErrorReason: convertCAPIMachineFailureReasonToMAPIErrorReason(capiStatus.FailureReason), + ErrorMessage: convertCAPIMachineFailureMessageToMAPIErrorMessage(capiStatus.FailureMessage), + Phase: convertCAPIMachinePhaseToMAPI(capiStatus.Phase), + Addresses: addresses, + + // LastOperation // this is MAPI-specific and not used in CAPI. + // ObservedGeneration: // We don't set the observed generation at this stage as it is handled by the machineSync controller. + // AuthoritativeAPI: // Ignore, this field as it is not present in CAPI. + // SynchronizedGeneration: // Ignore, this field as it is not present in CAPI. + } + + // unused fields from CAPI MachineStatus + // - NodeInfo: not present on the MAPI Machine status. + // - CertificatesExpiryDate: not present on the MAPI Machine status. + // - BootstrapReady: this is derived and not stored directly in MAPI. + // - InfrastructureReady: this is derived and not stored directly in MAPI. + // - Deletion: not present on the MAPI Machine status. + // - V1Beta2: for now we use the V1Beta1 status fields to obtain the status of the MAPI Machine. + + return mapiStatus, errs +} + +// convertCAPIMachineAddressesToMAPI converts CAPI machine addresses to MAPI format. +func convertCAPIMachineAddressesToMAPI(capiAddresses clusterv1.MachineAddresses) ([]corev1.NodeAddress, field.ErrorList) { + if capiAddresses == nil { + return nil, nil + } + + errs := field.ErrorList{} + mapiAddresses := make([]corev1.NodeAddress, 0, len(capiAddresses)) + + // Addresses are slightly different between MAPI/CAPI. + for _, addr := range capiAddresses { + switch addr.Type { + case clusterv1.MachineHostName: + mapiAddresses = append(mapiAddresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: addr.Address}) + case clusterv1.MachineExternalIP: + mapiAddresses = append(mapiAddresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: addr.Address}) + case clusterv1.MachineInternalIP: + mapiAddresses = append(mapiAddresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: addr.Address}) + case clusterv1.MachineExternalDNS: + mapiAddresses = append(mapiAddresses, corev1.NodeAddress{Type: corev1.NodeExternalDNS, Address: addr.Address}) + case clusterv1.MachineInternalDNS: + mapiAddresses = append(mapiAddresses, corev1.NodeAddress{Type: corev1.NodeInternalDNS, Address: addr.Address}) + default: + errs = append(errs, field.Invalid(field.NewPath("status", "addresses"), string(addr.Type), string(addr.Type)+" unrecognized address type")) + } + } + + return mapiAddresses, errs +} + +// convertCAPIMachinePhaseToMAPI converts CAPI machine phase to MAPI format. +func convertCAPIMachinePhaseToMAPI(capiPhase string) *string { + // Phase is slightly different between MAPI/CAPI. + // In CAPI can be one of: Pending;Provisioning;Provisioned;Running;Deleting;Deleted;Failed;Unknown + // In MAPI can be one of: Provisioning;Provisioned;Running;Deleting;Failed (missing Pending,Deleted,Unknown) + switch capiPhase { + case "": + return nil // Empty is equivalent to nil in MAPI. + case "Pending": + return nil // Pending is not supported in MAPI but is is a very early state so we don't need to represent it. + case "Deleted": + return ptr.To("Deleting") // Deleted is not supported in MAPI but we can stay in Deleting until the machine is fully removed. + case "Unknown": + return nil // Unknown is not supported in MAPI but we can set it to nil until we know more. + case "Provisioning", "Provisioned", "Running", "Deleting", "Failed": + return &capiPhase // This is a supported phase so we can represent it in MAPI. + default: + return nil // This is an unknown phase so we can't represent it in MAPI. + } +} + +// convertCAPIMachineFailureReasonToMAPIErrorReason converts CAPI MachineStatusError to MAPI MachineStatusError. +func convertCAPIMachineFailureReasonToMAPIErrorReason(capiFailureReason *capierrors.MachineStatusError) *mapiv1beta1.MachineStatusError { + if capiFailureReason == nil { + return nil + } + + mapiErrorReason := mapiv1beta1.MachineStatusError(*capiFailureReason) + + return &mapiErrorReason +} + +// convertCAPIMachineFailureMessageToMAPIErrorMessage converts CAPI MachineStatusError to MAPI MachineStatusError. +func convertCAPIMachineFailureMessageToMAPIErrorMessage(capiFailureMessage *string) *string { + if capiFailureMessage == nil { + return nil + } + + mapiErrorMessage := *capiFailureMessage + + return &mapiErrorMessage +} + const ( // Note the trailing slash here is important when we are trimming the prefix. capiPreDrainAnnotationPrefix = clusterv1.PreDrainDeleteHookAnnotationPrefix + "/" diff --git a/pkg/conversion/capi2mapi/machine_test.go b/pkg/conversion/capi2mapi/machine_test.go index e5d114358..e46e212ce 100644 --- a/pkg/conversion/capi2mapi/machine_test.go +++ b/pkg/conversion/capi2mapi/machine_test.go @@ -25,10 +25,11 @@ import ( capabuilder "github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/cluster-api/infrastructure/v1beta2" "github.com/openshift/cluster-capi-operator/pkg/conversion/test/matchers" "github.com/openshift/cluster-capi-operator/pkg/util" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capierrors "sigs.k8s.io/cluster-api/errors" ) var _ = Describe("capi2mapi Machine conversion", func() { @@ -97,3 +98,72 @@ var _ = Describe("capi2mapi Machine conversion", func() { }), ) }) + +var _ = Describe("capi2mapi Machine Status Conversion", func() { + Context("when converting CAPI Machine status to MAPI", func() { + It("should set all MAPI Machine status fields and conditions to the expected values", func() { + // Set CAPI machine status fields + nodeRef := &corev1.ObjectReference{ + Kind: "Node", + Name: "test-node", + Namespace: "", + } + lastUpdated := &metav1.Time{Time: time.Now()} + condition := clusterv1.Condition{ + Type: "Available", Status: corev1.ConditionTrue, + Severity: clusterv1.ConditionSeverityNone, + Reason: "MachineAvailable", Message: "Machine is available", + } + + capiMachine := capibuilder.Machine(). + WithName("test-machine"). + WithNamespace("test-namespace"). + WithNodeRef(nodeRef). + WithLastUpdated(lastUpdated). + WithAddresses(clusterv1.MachineAddresses{ + {Type: clusterv1.MachineAddressType(corev1.NodeInternalIP), Address: "10.0.0.1"}, + {Type: clusterv1.MachineAddressType(corev1.NodeExternalIP), Address: "203.0.113.1"}, + }). + WithPhase("Running"). + WithFailureReason(ptr.To(capierrors.MachineStatusError("InvalidConfiguration"))). + WithFailureMessage(ptr.To(string("Test failure message"))). + WithConditions([]clusterv1.Condition{condition}). + Build() + + mapiStatus, errs := convertCAPIMachineStatusToMAPI(capiMachine.Status) + Expect(errs).To(BeEmpty()) + + Expect(mapiStatus.NodeRef).To(Equal(nodeRef)) + Expect(mapiStatus.LastUpdated).To(Equal(lastUpdated)) + Expect(mapiStatus.Addresses).To(ConsistOf( + SatisfyAll(HaveField("Type", corev1.NodeInternalIP), HaveField("Address", "10.0.0.1")), + SatisfyAll(HaveField("Type", corev1.NodeExternalIP), HaveField("Address", "203.0.113.1")), + )) + + Expect(mapiStatus.Phase).To(HaveValue(BeEquivalentTo(mapiv1beta1.PhaseRunning))) + Expect(mapiStatus.ErrorReason).To(HaveValue(BeEquivalentTo(mapiv1beta1.MachineStatusError("InvalidConfiguration")))) + Expect(mapiStatus.ErrorMessage).To(HaveValue(BeEquivalentTo("Test failure message"))) + + // We do not convert these conditions to MAPI conditions as they are not a 1:1 mapping conversion between CAPI and MAPI. + Expect(mapiStatus.Conditions).To(BeNil()) + }) + + It("should set all MAPI Machine status fields to empty when CAPI MachineStatus is empty", func() { + capiMachine := capibuilder.Machine(). + WithName("test-machine"). + WithNamespace("test-namespace"). + Build() + + mapiStatus, errs := convertCAPIMachineStatusToMAPI(capiMachine.Status) + Expect(errs).To(BeEmpty()) + + Expect(mapiStatus.NodeRef).To(BeNil()) + Expect(mapiStatus.LastUpdated).To(BeNil()) + Expect(mapiStatus.Addresses).To(BeEmpty()) + Expect(mapiStatus.Phase).To(BeNil()) + Expect(mapiStatus.ErrorReason).To(BeNil()) + Expect(mapiStatus.ErrorMessage).To(BeNil()) + Expect(mapiStatus.Conditions).To(BeEmpty()) + }) + }) +}) diff --git a/pkg/conversion/capi2mapi/machineset.go b/pkg/conversion/capi2mapi/machineset.go index c6ad71ae1..0222a71cf 100644 --- a/pkg/conversion/capi2mapi/machineset.go +++ b/pkg/conversion/capi2mapi/machineset.go @@ -76,7 +76,10 @@ func convertCAPIMachineSetStatusToMAPI(capiStatus clusterv1.MachineSetStatus) ma // ObservedGeneration: // We don't set the observed generation at this stage as it is handled by the machineSetSync controller. // AuthoritativeAPI: // Ignore, this field as it is not present in CAPI. // SynchronizedGeneration: // Ignore, this field as it is not present in CAPI. - Conditions: convertCAPIConditionsToMAPI(capiStatus.Conditions), + + // The only two conditions normally used for MAPI MachineSets are Paused and Synchronized. + // We do not convert these conditions to MAPI conditions as they are managed directly by the machineSet sync and migration controllers. + Conditions: nil, } // Convert FailureReason/FailureMessage to ErrorReason/ErrorMessage @@ -100,10 +103,3 @@ func convertCAPIFailureReasonToMAPIErrorReason(capiFailureReason capierrors.Mach mapiErrorReason := mapiv1beta1.MachineSetStatusError(capiFailureReason) return &mapiErrorReason } - -// convertCAPIConditionsToMAPI converts CAPI conditions to MAPI conditions. -func convertCAPIConditionsToMAPI(capiConditions clusterv1.Conditions) []mapiv1beta1.Condition { - // The only two conditions normally used for MAPI MachineSets are Paused and Synchronized. - // We do not convert these conditions to MAPI conditions as they are managed directly by the machineSet sync and migration controllers. - return nil -} diff --git a/pkg/conversion/mapi2capi/machine.go b/pkg/conversion/mapi2capi/machine.go index 5ce60ed96..3cb5d0c88 100644 --- a/pkg/conversion/mapi2capi/machine.go +++ b/pkg/conversion/mapi2capi/machine.go @@ -23,7 +23,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capierrors "sigs.k8s.io/cluster-api/errors" ) const ( @@ -34,6 +36,11 @@ const ( func fromMAPIMachineToCAPIMachine(mapiMachine *mapiv1beta1.Machine, apiVersion, kind string) (*clusterv1.Machine, field.ErrorList) { var errs field.ErrorList + capiMachineStatus, capiMachineStatusErrs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + if len(capiMachineStatusErrs) > 0 { + errs = append(errs, capiMachineStatusErrs...) + } + capiMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: mapiMachine.Name, @@ -61,6 +68,7 @@ func fromMAPIMachineToCAPIMachine(mapiMachine *mapiv1beta1.Machine, apiVersion, // NodeDeletionTimeout: // TODO(OCPCLOUD-2715): not present on the MAPI API, we should implement them for feature parity. NodeDeletionTimeout: &metav1.Duration{Duration: time.Second * 10}, // Hardcode it to the CAPI default value until this is implemented in MAPI. }, + Status: capiMachineStatus, } // Node labels in MAPI are stored under .spec.metadata.labels and then propagated down to the node, @@ -79,3 +87,363 @@ func fromMAPIMachineToCAPIMachine(mapiMachine *mapiv1beta1.Machine, apiVersion, return capiMachine, errs } + +// convertMAPIMachineToCAPIMachineStatus converts a MAPI Machine to CAPI MachineStatus. +func convertMAPIMachineToCAPIMachineStatus(mapiMachine *mapiv1beta1.Machine) (clusterv1.MachineStatus, field.ErrorList) { + var errs field.ErrorList + + addresses, addressesErr := convertMAPIMachineAddressesToCAPI(mapiMachine.Status.Addresses) + if len(addressesErr) > 0 { + errs = append(errs, addressesErr...) + } + + capiStatus := clusterv1.MachineStatus{ + NodeRef: mapiMachine.Status.NodeRef, + LastUpdated: mapiMachine.Status.LastUpdated, + Addresses: addresses, + Phase: convertMAPIMachinePhaseToCAPI(mapiMachine.Status.Phase), + Conditions: convertMAPIMachineConditionsToCAPIMachineConditions(mapiMachine), + V1Beta2: convertMAPIMachineStatusToCAPIMachineV1Beta2Status(mapiMachine), + FailureReason: convertMAPIMachineErrorReasonToCAPIFailureReason(mapiMachine.Status.ErrorReason), + FailureMessage: convertMAPIMachineErrorMessageToCAPIFailureMessage(mapiMachine.Status.ErrorMessage), + InfrastructureReady: deriveCAPIInfrastructureReadyFromMAPI(mapiMachine), + BootstrapReady: deriveCAPIBootstrapReadyFromMAPI(mapiMachine), + + // MAPI doesn't provide node system info, so we return nil + // This field is typically populated by the node controller in CAPI + NodeInfo: nil, + + // Deletion: not present on the MAPI Machine status. // TODO: this is tied to the node draining and volume detaching, implement once those features are implemented in MAPI. + + // DO NOT SET HERE: + // CertificatesExpiryDate: // not present on the MAPI Machine status. (This value is only set for control plane machines, not necessary for worker machines conversion) + // ObservedGeneration: // We don't set the observed generation at this stage as it is handled by the machineSync controller. + } + + // unused fields from MAPI MachineStatus + + // - ProviderStatus: this is provider-specific and handled by separate infrastructure resources in CAPI. // TODO: use this when we implement CAPI InfraMachine conversion. + // - LastOperation: this is MAPI-specific and not used in CAPI. + // - AuthoritativeAPI: this is part of the conversion mechanism, it is not used in CAPI. + // - SynchronizedGeneration: this is part of the conversion mechanism, it is not used in CAPI. + + return capiStatus, errs +} + +// convertMAPIMachineStatusToCAPIMachineV1Beta2Status converts a MAPI Machine to CAPI MachineV1Beta2Status. +func convertMAPIMachineStatusToCAPIMachineV1Beta2Status(mapiMachine *mapiv1beta1.Machine) *clusterv1.MachineV1Beta2Status { + return &clusterv1.MachineV1Beta2Status{ + Conditions: convertMAPIMachineConditionsToCAPIMachineV1Beta2StatusConditions(mapiMachine), + } +} + +// convertMAPIMachineConditionsToCAPIMachineConditions converts MAPI conditions to CAPI v1beta1 conditions. +// +//nolint:funlen +func convertMAPIMachineConditionsToCAPIMachineConditions(mapiMachine *mapiv1beta1.Machine) clusterv1.Conditions { + capiConditions := []clusterv1.Condition{} + + // According to CAPI v1beta1 machine conditions, there are three main conditions: + // Ready, BootstrapReady, InfrastructureReady + + readyCondition := clusterv1.Condition{ + Type: clusterv1.ReadyCondition, + Status: func() corev1.ConditionStatus { + if mapiMachine.Status.Phase != nil && *mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning { + return corev1.ConditionTrue + } + + return corev1.ConditionFalse + }(), + Severity: func() clusterv1.ConditionSeverity { + if mapiMachine.Status.Phase != nil && *mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning { + return clusterv1.ConditionSeverityNone + } + + return clusterv1.ConditionSeverityError + }(), + // LastTransitionTime will be set by the condition utilities. + } + + bootstrapReadyCondition := clusterv1.Condition{ + Type: clusterv1.BootstrapReadyCondition, + Status: func() corev1.ConditionStatus { + if deriveCAPIBootstrapReadyFromMAPI(mapiMachine) { + return corev1.ConditionTrue + } + + return corev1.ConditionFalse + }(), + Severity: func() clusterv1.ConditionSeverity { + if !deriveCAPIBootstrapReadyFromMAPI(mapiMachine) { + return clusterv1.ConditionSeverityInfo + } + + return clusterv1.ConditionSeverityNone + }(), + // LastTransitionTime will be set by the condition utilities. + } + + infrastructureReadyCondition := clusterv1.Condition{ + Type: clusterv1.InfrastructureReadyCondition, + Status: func() corev1.ConditionStatus { + if deriveCAPIInfrastructureReadyFromMAPI(mapiMachine) { + return corev1.ConditionTrue + } + + return corev1.ConditionFalse + }(), + Reason: func() string { + if !deriveCAPIInfrastructureReadyFromMAPI(mapiMachine) { + return clusterv1.WaitingForInfrastructureFallbackReason + } + + return "" + }(), + Severity: func() clusterv1.ConditionSeverity { + if !deriveCAPIInfrastructureReadyFromMAPI(mapiMachine) { + return clusterv1.ConditionSeverityInfo + } + + return clusterv1.ConditionSeverityNone + }(), + // LastTransitionTime will be set by the condition utilities. + } + + capiConditions = append(capiConditions, readyCondition, bootstrapReadyCondition, infrastructureReadyCondition) + + return capiConditions +} + +// convertMAPIMachineConditionsToCAPIMachineV1Beta2StatusConditions converts MAPI conditions to CAPI v1beta2 conditions. +// +//nolint:funlen +func convertMAPIMachineConditionsToCAPIMachineV1Beta2StatusConditions(mapiMachine *mapiv1beta1.Machine) []metav1.Condition { + capiConditions := []metav1.Condition{} + + // According to CAPI v1beta2 machine conditions, there are 9 main conditions: + // Available, Ready, UpToDate, BootstrapConfigReady, InfrastructureReady, NodeReady, NodeHealthy, Deleting, Paused + + // Available condition - indicates if the machine is available for use + availableCondition := metav1.Condition{ + Type: clusterv1.AvailableV1Beta2Condition, + Status: func() metav1.ConditionStatus { + if hasRunningPhase(mapiMachine) { + return metav1.ConditionTrue + } + + return metav1.ConditionFalse + }(), + Reason: func() string { + if hasRunningPhase(mapiMachine) { + return clusterv1.MachineAvailableV1Beta2Reason // This is "Available" + } + + return clusterv1.NotAvailableV1Beta2Reason // This is "NotAvailable" + }(), + // LastTransitionTime will be set by the condition utilities. + } + + // Ready condition + readyCondition := metav1.Condition{ + Type: clusterv1.ReadyV1Beta2Condition, + Status: func() metav1.ConditionStatus { + if mapiMachine.Status.Phase != nil && *mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning { + return metav1.ConditionTrue + } + + return metav1.ConditionFalse + }(), + Reason: func() string { + if mapiMachine.Status.Phase != nil && *mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning { + return clusterv1.MachineReadyV1Beta2Reason + } + + return clusterv1.MachineNotReadyV1Beta2Reason + }(), + // LastTransitionTime will be set by the condition utilities. + } + + // BootstrapConfigReady condition + bootstrapConfigReadyCondition := metav1.Condition{ + Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, + Status: func() metav1.ConditionStatus { + if deriveCAPIBootstrapReadyFromMAPI(mapiMachine) { + return metav1.ConditionTrue + } + + return metav1.ConditionFalse + }(), + Reason: func() string { + if deriveCAPIBootstrapReadyFromMAPI(mapiMachine) { + return clusterv1.MachineBootstrapConfigReadyV1Beta2Reason + } + + return clusterv1.MachineBootstrapConfigNotReadyV1Beta2Reason + }(), + // LastTransitionTime will be set by the condition utilities. + } + + // InfrastructureReady condition + infrastructureReadyCondition := metav1.Condition{ + Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, + Status: func() metav1.ConditionStatus { + if deriveCAPIInfrastructureReadyFromMAPI(mapiMachine) { + return metav1.ConditionTrue + } + + return metav1.ConditionFalse + }(), + Reason: func() string { + if deriveCAPIInfrastructureReadyFromMAPI(mapiMachine) { + return clusterv1.MachineInfrastructureReadyV1Beta2Reason + } + + return clusterv1.MachineInfrastructureNotReadyV1Beta2Reason + }(), + // LastTransitionTime will be set by the condition utilities. + } + + // NodeReady condition + // For now use the machine phase to determine the status of the node ready condition. + // TODO: update this if we change our mind for the nodehealthy condition below. + nodeReadyCondition := metav1.Condition{ + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: func() metav1.ConditionStatus { + if mapiMachine.Status.Phase != nil && (*mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning || *mapiMachine.Status.Phase == mapiv1beta1.PhaseDeleting) && mapiMachine.Status.NodeRef != nil { + return metav1.ConditionTrue + } + + return metav1.ConditionFalse + }(), + Reason: func() string { + if mapiMachine.Status.Phase != nil && (*mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning || *mapiMachine.Status.Phase == mapiv1beta1.PhaseDeleting) && mapiMachine.Status.NodeRef != nil { + return clusterv1.MachineNodeReadyV1Beta2Reason + } + + return clusterv1.MachineNodeNotReadyV1Beta2Reason + }(), + // LastTransitionTime will be set by the condition utilities. + } + + // NodeHealthy condition + // MachineNodeHealthyV1Beta2Condition is true if the Machine's Node is ready and it does not report MemoryPressure, DiskPressure and PIDPressure. + // We don't ned this at the moment, and tt require significant hoops to get the Node object everytime and pipe it down to here. + // Do not implement this for now, rationale: + // https://github.com/openshift/cluster-capi-operator/pull/365#discussion_r2378857251 + + // UpToDate condition + // We should never set this condition in CAPI because we don't use MachineDeployments on the MAPI side + // and/or don't support "matching" higher level abstractions for the conversion of a MachineSet from MAPI to CAPI + + // Paused condition + // We ignore paused condition at this level as it is handled by the machineSetMigration controller. + + // Deleting condition + isDeleting := mapiMachine.DeletionTimestamp != nil && !mapiMachine.DeletionTimestamp.IsZero() + deletingCondition := metav1.Condition{ + Type: clusterv1.MachineDeletingV1Beta2Condition, + Status: map[bool]metav1.ConditionStatus{true: metav1.ConditionTrue, false: metav1.ConditionFalse}[isDeleting], + Reason: map[bool]string{true: clusterv1.MachineDeletingV1Beta2Reason, false: clusterv1.MachineNotDeletingV1Beta2Reason}[isDeleting], + // LastTransitionTime will be set by the condition utilities. + } + + capiConditions = append(capiConditions, availableCondition, readyCondition, bootstrapConfigReadyCondition, infrastructureReadyCondition, deletingCondition, nodeReadyCondition) + + return capiConditions +} + +// convertMAPIMachineAddressesToCAPI converts MAPI machine addresses to CAPI format. +func convertMAPIMachineAddressesToCAPI(mapiAddresses []corev1.NodeAddress) (clusterv1.MachineAddresses, field.ErrorList) { + if mapiAddresses == nil { + return nil, nil + } + + errs := field.ErrorList{} + capiAddresses := make(clusterv1.MachineAddresses, 0, len(mapiAddresses)) + + // Addresses are slightly different between MAPI/CAPI. + // In CAPI the address type can be: Hostname, ExternalIP, InternalIP, ExternalDNS or InternalDNS + // In MAPI the address type can be: Hostname, ExternalIP, InternalIP (missing ExternalDNS and InternalDNS) + // This is fine when going from MAPI to CAPI, but needs to be handled when going from CAPI to MAPI. + for _, addr := range mapiAddresses { + var t clusterv1.MachineAddressType + + switch addr.Type { + case corev1.NodeHostName: + t = clusterv1.MachineHostName + case corev1.NodeExternalIP: + t = clusterv1.MachineExternalIP + case corev1.NodeInternalIP: + t = clusterv1.MachineInternalIP + case corev1.NodeExternalDNS: + t = clusterv1.MachineExternalDNS + case corev1.NodeInternalDNS: + t = clusterv1.MachineInternalDNS + default: + errs = append(errs, field.Invalid(field.NewPath("status", "addresses"), string(addr.Type), string(addr.Type)+" unrecognized address type")) + + // Ignore the address if the type is unrecognized. + continue + } + + capiAddresses = append(capiAddresses, clusterv1.MachineAddress{ + Type: t, + Address: addr.Address, + }) + } + + return capiAddresses, errs +} + +// convertMAPIMachinePhaseToCAPI converts MAPI machine phase to CAPI format. +func convertMAPIMachinePhaseToCAPI(mapiPhase *string) string { + // Phase is slightly different between MAPI/CAPI. + // In CAPI can be one of: Pending;Provisioning;Provisioned;Running;Deleting;Deleted;Failed;Unknown + // In MAPI can be one of: Provisioning;Provisioned;Running;Deleting;Failed (missing Pending,Unknown) + // This is fine when going from MAPI to CAPI, but needs to be handled when going from CAPI to MAPI. + // MAPI and CAPI phases are compatible, but we need to handle the pointer vs string difference + return ptr.Deref(mapiPhase, "") +} + +// convertMAPIMachineErrorReasonToCAPIFailureReason converts MAPI MachineStatusError to CAPI MachineStatusError. +func convertMAPIMachineErrorReasonToCAPIFailureReason(mapiErrorReason *mapiv1beta1.MachineStatusError) *capierrors.MachineStatusError { + if mapiErrorReason == nil { + return nil + } + + return ptr.To(capierrors.MachineStatusError(*mapiErrorReason)) +} + +// convertMAPIMachineErrorMessageToCAPIFailureMessage converts MAPI MachineStatusError to CAPI MachineStatusError. +func convertMAPIMachineErrorMessageToCAPIFailureMessage(mapiErrorMessage *string) *string { + return mapiErrorMessage +} + +// deriveCAPIBootstrapReadyFromMAPI derives the CAPI BootstrapReady field from MAPI machine state. +func deriveCAPIBootstrapReadyFromMAPI(mapiMachine *mapiv1beta1.Machine) bool { + // Bootstrap is considered ready if the machine is in Running, Deleting phases + if mapiMachine.Status.Phase != nil { + phase := *mapiMachine.Status.Phase + + return phase == mapiv1beta1.PhaseRunning || phase == mapiv1beta1.PhaseDeleting + } + + return false +} + +// deriveCAPIInfrastructureReadyFromMAPI derives the CAPI InfrastructureReady field from MAPI machine state. +func deriveCAPIInfrastructureReadyFromMAPI(mapiMachine *mapiv1beta1.Machine) bool { + // Infrastructure is considered ready if the machine is in Provisioned, Running, Deleting phases + if mapiMachine.Status.Phase != nil { + phase := *mapiMachine.Status.Phase + return phase == mapiv1beta1.PhaseProvisioned || phase == mapiv1beta1.PhaseRunning || phase == mapiv1beta1.PhaseDeleting + } + + return false +} + +// hasRunningPhase checks if the machine is in the Running phase. +func hasRunningPhase(mapiMachine *mapiv1beta1.Machine) bool { + return mapiMachine.Status.Phase != nil && *mapiMachine.Status.Phase == mapiv1beta1.PhaseRunning +} diff --git a/pkg/conversion/mapi2capi/machine_test.go b/pkg/conversion/mapi2capi/machine_test.go index d5bd7bdb0..6fe590fc3 100644 --- a/pkg/conversion/mapi2capi/machine_test.go +++ b/pkg/conversion/mapi2capi/machine_test.go @@ -17,15 +17,19 @@ limitations under the License. package mapi2capi import ( + "time" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" mapiv1beta1 "github.com/openshift/api/machine/v1beta1" + testutils "github.com/openshift/cluster-api-actuator-pkg/testutils" configbuilder "github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/config/v1" machinebuilder "github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/machine/v1beta1" "github.com/openshift/cluster-capi-operator/pkg/conversion/test/matchers" "github.com/openshift/cluster-capi-operator/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -139,3 +143,424 @@ var _ = Describe("mapi2capi Machine conversion", func() { }), ) }) + +var _ = Describe("mapi2capi Machine Status Conversion", func() { + Context("when converting MAPI Machine status to CAPI", func() { + It("should set all CAPI Machine status fields and conditions to the expected values", func() { + // Set MAPI machine status fields + nodeRef := corev1.ObjectReference{ + Kind: "Node", + Name: "test-node", + Namespace: "", + } + lastUpdated := metav1.Time{Time: time.Now()} + condition := mapiv1beta1.Condition{ + Type: "Available", + Status: corev1.ConditionTrue, + Severity: mapiv1beta1.ConditionSeverityNone, + Reason: "MachineAvailable", + Message: "Machine is available", + } + + // Build a MAPI Machine with status fields set. + mapiMachine := machinebuilder.Machine(). + WithName("test-machine"). + WithNamespace("test-namespace"). + WithNodeRef(nodeRef). + WithLastUpdated(lastUpdated). + WithPhase("Running"). + WithErrorReason(mapiv1beta1.MachineStatusError("InvalidConfiguration")). + WithErrorMessage("Test error message"). + WithAddresses([]corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "10.0.0.1"}, + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + }). + WithConditions([]mapiv1beta1.Condition{condition}). + Build() + + // Convert MAPI Machine to CAPI Machine status. + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + + // Check CAPI Machine status fields are matching the expected values. + Expect(capiStatus.NodeRef).To(HaveValue(Equal(nodeRef))) + Expect(capiStatus.LastUpdated).To(HaveValue(Equal(lastUpdated))) + Expect(capiStatus.Addresses).To(SatisfyAll( + HaveLen(2), + ContainElement(SatisfyAll(HaveField("Type", BeEquivalentTo(corev1.NodeInternalIP)), HaveField("Address", Equal("10.0.0.1")))), + ContainElement(SatisfyAll(HaveField("Type", BeEquivalentTo(corev1.NodeExternalIP)), HaveField("Address", Equal("203.0.113.1")))), + )) + Expect(capiStatus.Phase).To(BeEquivalentTo(clusterv1.MachinePhaseRunning)) + Expect(capiStatus.FailureReason).To(HaveValue(BeEquivalentTo(mapiv1beta1.MachineStatusError("InvalidConfiguration")))) + Expect(capiStatus.FailureMessage).To(HaveValue(BeEquivalentTo("Test error message"))) + Expect(capiStatus.Conditions).To(SatisfyAll( + ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + // The Ready condition is computed based on the phase. + // In this case they are equal, so the condition is true. + Type: clusterv1.ReadyCondition, + Status: corev1.ConditionTrue, + })), + ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + // The BootstrapReady condition is computed based on the phase. + // In this case they are equal, so the condition is true. + Type: clusterv1.BootstrapReadyCondition, + Status: corev1.ConditionTrue, + })), + ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + // The InfrastructureReady condition is computed based on the phase. + // In this case they are equal, so the condition is true. + Type: clusterv1.InfrastructureReadyCondition, + Status: corev1.ConditionTrue, + })), + Not(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + // The Available condition is not copied from MAPI. + Type: "Available", + Status: corev1.ConditionTrue, + }))), + )) + Expect(capiStatus.V1Beta2.Conditions).To(SatisfyAll( + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The Available condition is not copied from MAPI. + Type: clusterv1.AvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineAvailableV1Beta2Reason, + })), + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The Ready condition is computed based on the phase. + Type: clusterv1.ReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineReadyV1Beta2Reason, + })), + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The BootstrapConfigReady condition is computed based on the phase. + Type: clusterv1.BootstrapConfigReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineBootstrapConfigReadyV1Beta2Reason, + })), + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The InfrastructureReady condition is computed based on the phase. + Type: clusterv1.InfrastructureReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineInfrastructureReadyV1Beta2Reason, + })), + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The NodeReady condition is computed based on the phase. + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, + })), + ContainElement(testutils.MatchCondition(metav1.Condition{ + // The Deleting condition is computed based on the phase. + Type: clusterv1.MachineDeletingV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, + })), + )) + }) + + // v1beta1 conditions + + It("should set CAPI Machine v1beta1 Ready condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.ReadyCondition, + Status: corev1.ConditionTrue, + }))) + }) + + It("should set CAPI Machine v1beta1 Ready condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.ReadyCondition, + Status: corev1.ConditionFalse, + }))) + }) + + It("should set CAPI Machine v1beta1 BootstrapReady condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.BootstrapReadyCondition, + Status: corev1.ConditionTrue, + }))) + }) + + It("should set CAPI Machine v1beta1 BootstrapReady condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.BootstrapReadyCondition, + Status: corev1.ConditionFalse, + }))) + }) + + It("should set CAPI Machine v1beta1 InfrastructureReady condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.InfrastructureReadyCondition, + Status: corev1.ConditionTrue, + }))) + }) + + It("should set CAPI Machine v1beta1 InfrastructureReady condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.Conditions).To(ContainElement(matchers.MatchCAPICondition(clusterv1.Condition{ + Type: clusterv1.InfrastructureReadyCondition, + Reason: clusterv1.WaitingForInfrastructureFallbackReason, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityInfo, + }))) + }) + + // v1beta2 conditions + + It("should set CAPI Machine v1beta2 Available condition to false when MAPI Machine phase is not Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.AvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.NotAvailableV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 Available condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.AvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineAvailableV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 Ready condition to false when MAPI Machine phase is not Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.ReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineNotReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 Ready condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.ReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 BootstrapConfigReady condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.BootstrapConfigReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineBootstrapConfigReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 BootstrapConfigReady condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.BootstrapConfigReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineBootstrapConfigNotReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 InfrastructureReady condition to true when MAPI Machine phase is Running", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.InfrastructureReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineInfrastructureReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 InfrastructureReady condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.InfrastructureReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineInfrastructureNotReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 NodeReady condition to true when MAPI Machine phase is Running", func() { + nodeRef := corev1.ObjectReference{ + Kind: "Node", + Name: "test-node", + Namespace: "", + } + + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + WithNodeRef(nodeRef). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 NodeReady condition to false when MAPI Machine phase is Running but NodeRef is not set", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Running"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineNodeNotReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 NodeReady condition to false when MAPI Machine phase is Provisioning", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineNodeNotReadyV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 Deleting condition to true when MAPI Machine phase is Deleting", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Deleting"). + WithDeletionTimestamp(ptr.To(metav1.Now())). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.MachineDeletingV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineDeletingV1Beta2Reason, + }))) + }) + + It("should set CAPI Machine v1beta2 Deleting condition to false when MAPI Machine phase is not Deleting", func() { + mapiMachine := machinebuilder.Machine(). + WithPhase("Provisioning"). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + Expect(errs).To(BeEmpty()) + Expect(capiStatus.V1Beta2.Conditions).To(ContainElement(testutils.MatchCondition(metav1.Condition{ + Type: clusterv1.MachineDeletingV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, + }))) + }) + + It("should return error and ignore unrecognized address types when addresses contain and invalid type", func() { + // Create a machine with both valid and invalid address types + mapiMachine := machinebuilder.Machine(). + WithAddresses([]corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "10.0.0.1"}, + {Type: corev1.NodeAddressType("UnrecognizedType"), Address: "192.168.1.1"}, + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + }). + Build() + + capiStatus, errs := convertMAPIMachineToCAPIMachineStatus(mapiMachine) + + // Should have only one error, and it should be for the unrecognized address type. + Expect(errs).To(ConsistOf(field.ErrorList{ + &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "status.addresses", + BadValue: "UnrecognizedType", + Detail: "UnrecognizedType unrecognized address type", + }, + })) + + // Should only contain the valid addresses (ignoring the unrecognized one) + Expect(capiStatus.Addresses).To(SatisfyAll( + HaveLen(2), + ContainElement(SatisfyAll(HaveField("Type", BeEquivalentTo(clusterv1.MachineInternalIP)), HaveField("Address", Equal("10.0.0.1")))), + ContainElement(SatisfyAll(HaveField("Type", BeEquivalentTo(clusterv1.MachineExternalIP)), HaveField("Address", Equal("203.0.113.1")))), + Not(ContainElement(HaveField("Address", Equal("192.168.1.1")))), + )) + }) + }) +}) diff --git a/pkg/conversion/test/fuzz/fuzz.go b/pkg/conversion/test/fuzz/fuzz.go index f091495b4..a9cd4058d 100644 --- a/pkg/conversion/test/fuzz/fuzz.go +++ b/pkg/conversion/test/fuzz/fuzz.go @@ -124,8 +124,17 @@ func CAPI2MAPIMachineRoundTripFuzzTest(scheme *runtime.Scheme, infra *configv1.I // Break down the comparison to make it easier to debug sections that are failing conversion. + // Status comparison + capiMachine.Status.Conditions = nil // This is not a 1:1 mapping conversion between CAPI and MAPI. + capiMachine.Status.V1Beta2.Conditions = nil // This is not a 1:1 mapping conversion between CAPI and MAPI. + + capiMachine.Status.CertificatesExpiryDate = nil // This is not present on the MAPI Machine status. + capiMachine.Status.Deletion = nil // This is not present on the MAPI Machine status. + capiMachine.Status.NodeInfo = nil // This is not present on the MAPI Machine status. + + Expect(capiMachine.Status).To(Equal(in.machine.Status)) + // Status comparison for infrastructure machines is not implemented yet. - // Expect(capiMachine.Status).To(Equal(in.machine.Status)) // Expect(infraMachine.Status).To(Equal(in.infraMachine.Status)) capiMachine.Finalizers = nil @@ -286,8 +295,12 @@ func MAPI2CAPIMachineRoundTripFuzzTest(scheme *runtime.Scheme, infra *configv1.I // Break down the comparison to make it easier to debug sections that are failing conversion. - // Status comparison for machines is not implemented yet. - // Expect(mapiMachine.Status).To(Equal(in.machine.Status)) + mapiMachine.Status.ProviderStatus = nil // TODO: This should be removed once the CAPI InfraMachine to MAPI Machine conversion is implemented. + mapiMachine.Status.LastOperation = nil // Ignore, this field as it is not present in CAPI. + // The conditions are not a 1:1 mapping conversion between CAPI and MAPI. + // So null them out to match the original nil fuzzing. + mapiMachine.Status.Conditions = nil + Expect(mapiMachine.Status).To(Equal(in.machine.Status)) mapiMachine.Finalizers = nil Expect(mapiMachine.TypeMeta).To(Equal(in.machine.TypeMeta), "converted MAPI machine should have matching .typeMeta") @@ -343,9 +356,6 @@ func MAPI2CAPIMachineSetRoundTripFuzzTest(scheme *runtime.Scheme, infra *configv Expect(warnings).To(BeEmpty()) // Break down the comparison to make it easier to debug sections that are failing conversion. - - // Status comparison temporarily disabled due to field differences between MAPI and CAPI - mapiMachineSet.Finalizers = nil Expect(mapiMachineSet.TypeMeta).To(Equal(in.machineSet.TypeMeta), "converted MAPI machine set should have matching .typeMeta") Expect(mapiMachineSet.ObjectMeta).To(Equal(in.machineSet.ObjectMeta), "converted MAPI machine set should have matching .metadata") @@ -484,6 +494,25 @@ func CAPIMachineFuzzerFuncs(providerIDFuzz StringFuzzer, infraKind, infraAPIVers Namespace: m.Namespace, } }, + func(m *clusterv1.MachineStatus, c randfill.Continue) { + c.FillNoCustom(m) + + fuzzCAPIMachineStatusAddresses(&m.Addresses, c) + fuzzCAPIMachineStatusPhase(&m.Phase, c) + + m.ObservedGeneration = 0 // Ignore, this field as it shouldn't match between CAPI and MAPI. + m.Conditions = nil // Ignore, this field as it is not a 1:1 mapping between CAPI and MAPI but rather a recomputation of the conditions based on other fields. + m.CertificatesExpiryDate = nil // Ignore, this field as it is not present in MAPI. + m.Deletion = nil // Ignore, this field as it is not present in MAPI. + }, + func(m *clusterv1.MachineStatus, c randfill.Continue) { + // Deal with the V1Beta2 status field. + if m.V1Beta2 == nil { + m.V1Beta2 = &clusterv1.MachineV1Beta2Status{} + } + + m.V1Beta2.Conditions = nil + }, } } } @@ -635,6 +664,18 @@ func MAPIMachineFuzzerFuncs(providerSpec runtime.Object, providerIDFuzz StringFu hooks.PreDrain = nil } }, + func(m *mapiv1beta1.MachineStatus, c randfill.Continue) { + c.FillNoCustom(m) + + fuzzMAPIMachineStatusAddresses(&m.Addresses, c) + fuzzMAPIMachineStatusPhase(m.Phase, c) + + m.LastOperation = nil // Ignore, this field as it is not present in CAPI. + m.ProviderStatus = nil // Ignore, this field as the conversion logic is not yet implemented. // TODO: remove this once the InfraMachine conversion is implemented. + m.AuthoritativeAPI = "" // Ignore, this field as it is not present in CAPI. + m.SynchronizedGeneration = 0 // Ignore, this field as it is not present in CAPI. + m.Conditions = nil // Ignore, this field as it is not a 1:1 mapping between CAPI and MAPI but rather a recomputation of the conditions based on other fields. + }, } } } @@ -685,6 +726,7 @@ func MAPIMachineSetFuzzerFuncs() fuzzer.FuzzerFuncs { } } +// fuzzMAPIMachineSetSpecDeletePolicy fuzzes a single MAPI MachineSetDeletePolicy with valid values. func fuzzMAPIMachineSetSpecDeletePolicy(deletePolicy *string, c randfill.Continue) { switch c.Int31n(3) { case 0: @@ -702,6 +744,7 @@ func fuzzMAPIMachineSetSpecDeletePolicy(deletePolicy *string, c randfill.Continu } //nolint:wsl } +// fuzzCAPIMachineSetSpecDeletePolicy fuzzes a single CAPI MachineSetDeletePolicy with valid values. func fuzzCAPIMachineSetSpecDeletePolicy(deletePolicy *string, c randfill.Continue) { switch c.Int31n(3) { case 0: @@ -719,3 +762,167 @@ func fuzzCAPIMachineSetSpecDeletePolicy(deletePolicy *string, c randfill.Continu // This is not an issue in real conditions as the defaults are the same for CAPI and MAPI (Random). } //nolint:wsl } + +// fuzzMAPIMachineStatusAddress fuzzes a single MAPI machine status address with valid address types and randomized IP addresses. +// +//nolint:dupl +func fuzzMAPIMachineStatusAddress(address *corev1.NodeAddress, c randfill.Continue) { + // Fuzz the address type to one of the valid types for MAPI machines + // Based on the conversion code, MAPI supports: Hostname, ExternalIP, InternalIP + // (ExternalDNS and InternalDNS are not supported in MAPI conversion) + switch c.Int31n(5) { + case 0: + address.Type = corev1.NodeHostName + // Generate a random hostname + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + case 1: + address.Type = corev1.NodeExternalIP + // Generate a random external IP address (public IP range) + address.Address = fmt.Sprintf("%d.%d.%d.%d", + c.Int31n(223)+1, // 1-223 (avoid 0.x.x.x and 224+ multicast) + c.Int31n(256), // 0-255 + c.Int31n(256), // 0-255 + c.Int31n(254)+1) // 1-254 (avoid .0 and .255) + case 2: + address.Type = corev1.NodeInternalIP + // Generate a random internal IP address (private IP ranges) + switch c.Int31n(3) { + case 0: + // 10.0.0.0/8 + address.Address = fmt.Sprintf("10.%d.%d.%d", + c.Int31n(256), c.Int31n(256), c.Int31n(254)+1) + case 1: + // 172.16.0.0/12 + address.Address = fmt.Sprintf("172.%d.%d.%d", + c.Int31n(16)+16, c.Int31n(256), c.Int31n(254)+1) + case 2: + // 192.168.0.0/16 + address.Address = fmt.Sprintf("192.168.%d.%d", + c.Int31n(256), c.Int31n(254)+1) + } + case 3: + address.Type = corev1.NodeExternalDNS + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + case 4: + address.Type = corev1.NodeInternalDNS + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + } +} + +// fuzzMAPIMachineStatusAddresses fuzzes a slice of MAPI machine status addresses with randomized count and content. +func fuzzMAPIMachineStatusAddresses(addresses *[]corev1.NodeAddress, c randfill.Continue) { + // Randomize the number of addresses (0-3 addresses) + count := c.Int31n(4) + *addresses = make([]corev1.NodeAddress, count) + + // Fuzz each address + for i := range *addresses { + fuzzMAPIMachineStatusAddress(&(*addresses)[i], c) + } +} + +// fuzzMAPIMachineStatusPhase fuzzes a single MAPI machine status phase with valid phases. +func fuzzMAPIMachineStatusPhase(phase *string, c randfill.Continue) { + if phase == nil { + phase = ptr.To("") + } + + switch c.Int31n(5) { + case 0: + *phase = "Running" + case 1: + *phase = "Provisioning" + case 2: + *phase = "Provisioned" + case 3: + *phase = "Deleting" + case 4: + *phase = "Failed" + } +} + +// fuzzCAPIMachineStatusAddresses fuzzes a slice of CAPI machine status addresses with randomized count and content. +func fuzzCAPIMachineStatusAddresses(addresses *clusterv1.MachineAddresses, c randfill.Continue) { + // Randomize the number of addresses (0-3 addresses) + count := c.Int31n(4) + *addresses = make(clusterv1.MachineAddresses, count) + + // Fuzz each address + for i := range *addresses { + fuzzCAPIMachineStatusAddress(&(*addresses)[i], c) + } +} + +// fuzzCAPIMachineStatusPhase fuzzes a single CAPI machine status phase with valid phases. +func fuzzCAPIMachineStatusPhase(phase *string, c randfill.Continue) { + if phase == nil { + phase = ptr.To("") + } + + switch c.Int31n(8) { + case 0: + *phase = string(clusterv1.MachinePhasePending) + case 1: + *phase = string(clusterv1.MachinePhaseRunning) + case 2: + *phase = string(clusterv1.MachinePhaseProvisioning) + case 3: + *phase = string(clusterv1.MachinePhaseProvisioned) + case 4: + *phase = string(clusterv1.MachinePhaseDeleting) + case 5: + *phase = string(clusterv1.MachinePhaseFailed) + case 6: + *phase = string(clusterv1.MachinePhaseDeleted) + case 7: + *phase = string(clusterv1.MachinePhaseUnknown) + } +} + +// fuzzCAPIMachineStatusAddress fuzzes a single CAPI machine status address with valid address types and randomized IP addresses. +// +//nolint:dupl +func fuzzCAPIMachineStatusAddress(address *clusterv1.MachineAddress, c randfill.Continue) { + // Fuzz the address type to one of the valid types for CAPI machines + // Based on the conversion code, CAPI supports: Hostname, ExternalIP, InternalIP + // (ExternalDNS and InternalDNS are not supported in CAPI conversion) + switch c.Int31n(5) { + case 0: + address.Type = clusterv1.MachineHostName + // Generate a random hostname + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + case 1: + address.Type = clusterv1.MachineExternalIP + // Generate a random external IP address (public IP range) + address.Address = fmt.Sprintf("%d.%d.%d.%d", + c.Int31n(223)+1, // 1-223 (avoid 0.x.x.x and 224+ multicast) + c.Int31n(256), // 0-255 + c.Int31n(256), // 0-255 + c.Int31n(254)+1) // 1-254 (avoid .0 and .255) + case 2: + address.Type = clusterv1.MachineInternalIP + // Generate a random internal IP address (private IP ranges) + switch c.Int31n(3) { + case 0: + // 10.0.0.0/8 + address.Address = fmt.Sprintf("10.%d.%d.%d", + c.Int31n(256), c.Int31n(256), c.Int31n(254)+1) + case 1: + // 172.16.0.0/12 + address.Address = fmt.Sprintf("172.%d.%d.%d", + c.Int31n(16)+16, c.Int31n(256), c.Int31n(254)+1) + case 2: + // 192.168.0.0/16 + address.Address = fmt.Sprintf("192.168.%d.%d", + c.Int31n(256), c.Int31n(254)+1) + } + case 3: + address.Type = clusterv1.MachineExternalDNS + // Generate a random external DNS address + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + case 4: + address.Type = clusterv1.MachineInternalDNS + // Generate a random internal DNS address + address.Address = fmt.Sprintf("node-%d.example.com", c.Int31n(1000)) + } +} diff --git a/pkg/util/conditions.go b/pkg/util/conditions.go index 260483d6d..c4f22b8e7 100644 --- a/pkg/util/conditions.go +++ b/pkg/util/conditions.go @@ -113,8 +113,8 @@ func getTime(data map[string]interface{}, key string) metav1.Time { return metav1.Time{} } -// GetMAPIMachineSetCondition retrieves a specific condition from a list of MAPI MachineSet conditions. -func GetMAPIMachineSetCondition(conditions []mapiv1beta1.Condition, conditionType string) *mapiv1beta1.Condition { +// GetMAPICondition retrieves a specific condition from a list of MAPI conditions. +func GetMAPICondition(conditions []mapiv1beta1.Condition, conditionType string) *mapiv1beta1.Condition { for i := range conditions { if string(conditions[i].Type) == conditionType { return &conditions[i] @@ -124,14 +124,14 @@ func GetMAPIMachineSetCondition(conditions []mapiv1beta1.Condition, conditionTyp return nil } -// SetMAPIMachineSetCondition sets a condition in a list of MAPI MachineSet conditions. +// SetMAPICondition sets a condition in a list of MAPI conditions. // If the condition already exists and state (Status, Reason, Message) has changed: // - if the lasttransitiontime is not set, it sets it to the current time // - if the lasttransitiontime is set, it updates it with the one of the newly provided condition lasttransitiontime. // If the condition state has not changed, it preserves the existing LastTransitionTime. // If the condition does not exist, it adds it. // This function behaves similarly to conditions.Set() for CAPI conditions. -func SetMAPIMachineSetCondition(conditions []mapiv1beta1.Condition, condition *mapiv1beta1.Condition) []mapiv1beta1.Condition { +func SetMAPICondition(conditions []mapiv1beta1.Condition, condition *mapiv1beta1.Condition) []mapiv1beta1.Condition { for i, currCondition := range conditions { if string(currCondition.Type) == string(condition.Type) { // Check if the condition state has changed (Status, Reason, Message) @@ -155,6 +155,11 @@ func SetMAPIMachineSetCondition(conditions []mapiv1beta1.Condition, condition *m } } + // Ensure LastTransitionTime is set also for new conditions. + if condition.LastTransitionTime.IsZero() { + condition.LastTransitionTime = metav1.NewTime(time.Now().UTC().Truncate(time.Second)) + } + // Condition doesn't exist, add it return append(conditions, *condition) } diff --git a/pkg/util/sync.go b/pkg/util/sync.go index 89e21110e..05ec4a508 100644 --- a/pkg/util/sync.go +++ b/pkg/util/sync.go @@ -81,13 +81,9 @@ func ObjectMetaEqual(a, b metav1.ObjectMeta) map[string]any { // CAPIMachineSetStatusEqual compares variables a and b, // and returns a list of differences, or nil if there are none, // for the fields we care about when synchronising MAPI and CAPI Machines. -func CAPIMachineSetStatusEqual(a, b clusterv1.MachineSetStatus) map[string]any { //nolint:dupl +func CAPIMachineSetStatusEqual(a, b clusterv1.MachineSetStatus) map[string]any { diff := map[string]any{} - if diffConditions := compareCAPIMachineSetConditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { - diff[".conditions"] = diffConditions - } - if diffReadyReplicas := deep.Equal(a.ReadyReplicas, b.ReadyReplicas); len(diffReadyReplicas) > 0 { diff[".readyReplicas"] = diffReadyReplicas } @@ -108,10 +104,137 @@ func CAPIMachineSetStatusEqual(a, b clusterv1.MachineSetStatus) map[string]any { diff[".failureMessage"] = diffFailureMessage } + if diffConditions := compareCAPIV1Beta1Conditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { + diff[".conditions"] = diffConditions + } + + // Compare the v1beta2 fields. + if a.V1Beta2 == nil { + a.V1Beta2 = &clusterv1.MachineSetV1Beta2Status{} + } + + if b.V1Beta2 == nil { + b.V1Beta2 = &clusterv1.MachineSetV1Beta2Status{} + } + + if diffUpToDateReplicas := deep.Equal(a.V1Beta2.UpToDateReplicas, b.V1Beta2.UpToDateReplicas); len(diffUpToDateReplicas) > 0 { + diff[".v1beta2.upToDateReplicas"] = diffUpToDateReplicas + } + + if diffAvailableReplicas := deep.Equal(a.V1Beta2.AvailableReplicas, b.V1Beta2.AvailableReplicas); len(diffAvailableReplicas) > 0 { + diff[".v1beta2.availableReplicas"] = diffAvailableReplicas + } + + if diffReadyReplicas := deep.Equal(a.V1Beta2.ReadyReplicas, b.V1Beta2.ReadyReplicas); len(diffReadyReplicas) > 0 { + diff[".v1beta2.readyReplicas"] = diffReadyReplicas + } + + if diffConditions := compareCAPIV1Beta2Conditions(a.V1Beta2.Conditions, b.V1Beta2.Conditions); len(diffConditions) > 0 { + diff[".v1beta2.conditions"] = diffConditions + } + + return diff +} + +// CAPIMachineStatusEqual compares variables a and b, +// and returns a list of differences, or nil if there are none, +// for the fields we care about when synchronising CAPI and MAPI Machines. +func CAPIMachineStatusEqual(a, b clusterv1.MachineStatus) map[string]any { + diff := map[string]any{} + + if diffFailureReason := deep.Equal(a.FailureReason, b.FailureReason); len(diffFailureReason) > 0 { + diff[".failureReason"] = diffFailureReason + } + + if diffFailureMessage := deep.Equal(a.FailureMessage, b.FailureMessage); len(diffFailureMessage) > 0 { + diff[".failureMessage"] = diffFailureMessage + } + + if diffLastUpdated := deep.Equal(a.LastUpdated, b.LastUpdated); len(diffLastUpdated) > 0 { + diff[".lastUpdated"] = diffLastUpdated + } + + if diffPhase := deep.Equal(a.Phase, b.Phase); len(diffPhase) > 0 { + diff[".phase"] = diffPhase + } + + if diffAddresses := deep.Equal(a.Addresses, b.Addresses); len(diffAddresses) > 0 { + diff[".addresses"] = diffAddresses + } + + if diffBootstrapReady := deep.Equal(a.BootstrapReady, b.BootstrapReady); len(diffBootstrapReady) > 0 { + diff[".bootstrapReady"] = diffBootstrapReady + } + + if diffInfrastructureReady := deep.Equal(a.InfrastructureReady, b.InfrastructureReady); len(diffInfrastructureReady) > 0 { + diff[".infrastructureReady"] = diffInfrastructureReady + } + + if diffNodeInfo := deep.Equal(a.NodeInfo, b.NodeInfo); len(diffNodeInfo) > 0 { + diff[".nodeInfo"] = diffNodeInfo + } + + if diffConditions := compareCAPIV1Beta1Conditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { + diff[".conditions"] = diffConditions + } + + // Compare the v1beta2 fields. + if a.V1Beta2 == nil { + a.V1Beta2 = &clusterv1.MachineV1Beta2Status{} + } + + if b.V1Beta2 == nil { + b.V1Beta2 = &clusterv1.MachineV1Beta2Status{} + } + + if diffConditions := compareCAPIV1Beta2Conditions(a.V1Beta2.Conditions, b.V1Beta2.Conditions); len(diffConditions) > 0 { + diff[".v1beta2.conditions"] = diffConditions + } + return diff } -func compareCAPIMachineSetConditions(a, b []clusterv1.Condition) []string { +// MAPIMachineStatusEqual compares variables a and b, +// and returns a list of differences, or nil if there are none, +// for the fields we care about when synchronising CAPI and MAPI Machines. +func MAPIMachineStatusEqual(a, b mapiv1beta1.MachineStatus) map[string]any { + diff := map[string]any{} + + if diffConditions := compareMAPIV1Beta1Conditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { + diff[".conditions"] = diffConditions + } + + if diffErrorReason := deep.Equal(a.ErrorReason, b.ErrorReason); len(diffErrorReason) > 0 { + diff[".errorReason"] = diffErrorReason + } + + if diffErrorMessage := deep.Equal(a.ErrorMessage, b.ErrorMessage); len(diffErrorMessage) > 0 { + diff[".errorMessage"] = diffErrorMessage + } + + if diffPhase := deep.Equal(a.Phase, b.Phase); len(diffPhase) > 0 { + diff[".phase"] = diffPhase + } + + if diffAddresses := deep.Equal(a.Addresses, b.Addresses); len(diffAddresses) > 0 { + diff[".addresses"] = diffAddresses + } + + if diffNodeRef := deep.Equal(a.NodeRef, b.NodeRef); len(diffNodeRef) > 0 { + diff[".nodeRef"] = diffNodeRef + } + + if diffLastUpdated := deep.Equal(a.LastUpdated, b.LastUpdated); len(diffLastUpdated) > 0 { + diff[".lastUpdated"] = diffLastUpdated + } + + return diff +} + +// compareCAPIV1Beta1Conditions compares variables a and b, +// and returns a list of differences, or nil if there are none, +// for the fields we care about when synchronising CAPI v1beta1 and MAPI. +func compareCAPIV1Beta1Conditions(a, b []clusterv1.Condition) []string { diff := []string{} // Compare the conditions one by one. // Ignore the differences in LastTransitionTime. @@ -133,7 +256,10 @@ func compareCAPIMachineSetConditions(a, b []clusterv1.Condition) []string { return diff } -func compareMAPIMachineSetConditions(a, b []mapiv1beta1.Condition) []string { +// compareMAPIV1Beta1Conditions compares variables a and b, +// and returns a list of differences, or nil if there are none, +// for the fields we care about when synchronising MAPI v1beta1 and CAPI. +func compareMAPIV1Beta1Conditions(a, b []mapiv1beta1.Condition) []string { diff := []string{} // Compare the conditions one by one. // Ignore the differences in LastTransitionTime. @@ -155,16 +281,36 @@ func compareMAPIMachineSetConditions(a, b []mapiv1beta1.Condition) []string { return diff } +// compareCAPIV1Beta2Conditions compares variables a and b, +// and returns a list of differences, or nil if there are none, +// for the fields we care about when synchronising CAPI v1beta2 and MAPI. +func compareCAPIV1Beta2Conditions(a, b []metav1.Condition) []string { + diff := []string{} + // Compare the conditions one by one. + // Ignore the differences in LastTransitionTime. + for _, condition := range a { + for _, conditionB := range b { + if condition.Type == conditionB.Type { + if condition.Status != conditionB.Status || + condition.Reason != conditionB.Reason || + condition.Message != conditionB.Message { + diff = append(diff, deep.Equal(condition, conditionB)...) + } + + break // Break out of the inner loop once we have found a match. + } + } + } + + return diff +} + // MAPIMachineSetStatusEqual compares variables a and b, // and returns a list of differences, or nil if there are none, // for the fields we care about when synchronising MAPI and CAPI Machines. -func MAPIMachineSetStatusEqual(a, b mapiv1beta1.MachineSetStatus) map[string]any { //nolint:dupl +func MAPIMachineSetStatusEqual(a, b mapiv1beta1.MachineSetStatus) map[string]any { diff := map[string]any{} - if diffConditions := compareMAPIMachineSetConditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { - diff[".conditions"] = diffConditions - } - if diffReadyReplicas := deep.Equal(a.ReadyReplicas, b.ReadyReplicas); len(diffReadyReplicas) > 0 { diff[".readyReplicas"] = diffReadyReplicas } @@ -185,6 +331,10 @@ func MAPIMachineSetStatusEqual(a, b mapiv1beta1.MachineSetStatus) map[string]any diff[".errorMessage"] = diffErrorMessage } + if diffConditions := compareMAPIV1Beta1Conditions(a.Conditions, b.Conditions); len(diffConditions) > 0 { + diff[".conditions"] = diffConditions + } + return diff }