diff --git a/Makefile b/Makefile index d0eb30e34..a075cc3f1 100644 --- a/Makefile +++ b/Makefile @@ -7,11 +7,13 @@ HUB_AGENT_IMAGE_VERSION ?= $(TAG) MEMBER_AGENT_IMAGE_VERSION ?= $(TAG) REFRESH_TOKEN_IMAGE_VERSION ?= $(TAG) CRD_INSTALLER_IMAGE_VERSION ?= $(TAG) +CRD_CLEANUP_IMAGE_VERSION ?= $(TAG) HUB_AGENT_IMAGE_NAME ?= hub-agent MEMBER_AGENT_IMAGE_NAME ?= member-agent REFRESH_TOKEN_IMAGE_NAME ?= refresh-token CRD_INSTALLER_IMAGE_NAME ?= crd-installer +CRD_CLEANUP_IMAGE_NAME ?= crd-cleanup ARC_MEMBER_AGENT_HELMCHART_NAME = arc-member-cluster-agents-helm-chart TARGET_OS ?= linux @@ -256,6 +258,7 @@ build: generate fmt vet ## Build agent binaries go build -o bin/hubagent cmd/hubagent/main.go go build -o bin/memberagent cmd/memberagent/main.go go build -o bin/crdinstaller cmd/crdinstaller/main.go + go build -o bin/crdcleanup cmd/crdcleanup/main.go .PHONY: run-hubagent run-hubagent: manifests generate fmt vet ## Run hub-agent from your host @@ -280,7 +283,7 @@ BUILDKIT_VERSION ?= v0.18.1 .PHONY: push push: ## Build and push all Docker images - $(MAKE) OUTPUT_TYPE="type=registry" docker-build-hub-agent docker-build-member-agent docker-build-refresh-token docker-build-crd-installer + $(MAKE) OUTPUT_TYPE="type=registry" docker-build-hub-agent docker-build-member-agent docker-build-refresh-token docker-build-crd-installer docker-build-crd-cleanup # By default, docker buildx create will pull image moby/buildkit:buildx-stable-1 and hit the too many requests error # @@ -360,6 +363,18 @@ docker-build-crd-installer: docker-buildx-builder --build-arg GOARCH=$(TARGET_ARCH) \ --build-arg GOOS=${TARGET_OS} . +.PHONY: docker-build-crd-cleanup +docker-build-crd-cleanup: docker-buildx-builder ## Build crd-cleanup image + docker buildx build \ + --file docker/crd-cleanup.Dockerfile \ + --output=$(OUTPUT_TYPE) \ + --platform=$(TARGET_OS)/$(TARGET_ARCH) \ + --pull \ + --tag $(REGISTRY)/$(CRD_CLEANUP_IMAGE_NAME):$(CRD_CLEANUP_IMAGE_VERSION) \ + --progress=$(BUILDKIT_PROGRESS_TYPE) \ + --build-arg GOARCH=$(TARGET_ARCH) \ + --build-arg GOOS=${TARGET_OS} . + # Fleet Agents and Networking Agents are packaged and pushed to MCR for Arc Extension. .PHONY: helm-package-arc-member-cluster-agents helm-package-arc-member-cluster-agents: diff --git a/charts/hub-agent/templates/crd-cleanup-job.yaml b/charts/hub-agent/templates/crd-cleanup-job.yaml new file mode 100644 index 000000000..e2b8de959 --- /dev/null +++ b/charts/hub-agent/templates/crd-cleanup-job.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.crdCleanup.enabled .Values.enableV1Beta1APIs }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "hub-agent.fullname" . }}-crd-cleanup + namespace: {{ .Values.namespace }} + labels: + {{- include "hub-agent.labels" . | nindent 4 }} + annotations: + # Run this job before helm deletes other resources + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed +spec: + ttlSecondsAfterFinished: 60 + activeDeadlineSeconds: 300 + backoffLimit: 2 + template: + metadata: + labels: + {{- include "hub-agent.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "hub-agent.fullname" . }}-sa + restartPolicy: Never + containers: + - name: crd-cleanup + image: "{{ .Values.crdCleanup.image.repository }}:{{ .Values.crdCleanup.image.tag }}" + imagePullPolicy: {{ .Values.crdCleanup.image.pullPolicy }} + args: + - --mode=hub + - --v={{ .Values.crdCleanup.logVerbosity }} + resources: + limits: + cpu: 50m + memory: 64Mi + requests: + cpu: 10m + memory: 32Mi +{{- end }} diff --git a/charts/hub-agent/values.yaml b/charts/hub-agent/values.yaml index ed522322d..80b7ab0a2 100644 --- a/charts/hub-agent/values.yaml +++ b/charts/hub-agent/values.yaml @@ -19,6 +19,16 @@ crdInstaller: tag: main logVerbosity: 2 +# CRD cleanup job configuration. +# This job cleans up CRDs when the helm chart is uninstalled. +crdCleanup: + enabled: false + image: + repository: ghcr.io/azure/fleet/crd-cleanup + pullPolicy: Always + tag: main + logVerbosity: 2 + logVerbosity: 5 enableWebhook: true diff --git a/charts/member-agent/templates/crd-cleanup-job.yaml b/charts/member-agent/templates/crd-cleanup-job.yaml new file mode 100644 index 000000000..323523bf4 --- /dev/null +++ b/charts/member-agent/templates/crd-cleanup-job.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.crdCleanup.enabled .Values.enableV1Beta1APIs }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "member-agent.fullname" . }}-crd-cleanup + namespace: {{ .Values.namespace }} + labels: + {{- include "member-agent.labels" . | nindent 4 }} + annotations: + # Run this job before helm deletes other resources + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed +spec: + ttlSecondsAfterFinished: 60 + activeDeadlineSeconds: 300 + backoffLimit: 2 + template: + metadata: + labels: + {{- include "member-agent.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "member-agent.fullname" . }}-sa + restartPolicy: Never + containers: + - name: crd-cleanup + image: "{{ .Values.crdCleanup.image.repository }}:{{ .Values.crdCleanup.image.tag }}" + imagePullPolicy: {{ .Values.crdCleanup.image.pullPolicy }} + args: + - --mode=member + - --v={{ .Values.crdCleanup.logVerbosity }} + resources: + limits: + cpu: 50m + memory: 64Mi + requests: + cpu: 10m + memory: 32Mi +{{- end }} diff --git a/charts/member-agent/values.yaml b/charts/member-agent/values.yaml index db806ee92..48b22bf16 100644 --- a/charts/member-agent/values.yaml +++ b/charts/member-agent/values.yaml @@ -14,6 +14,16 @@ crdInstaller: tag: main logVerbosity: 2 +# CRD cleanup job configuration. +# This job cleans up CRDs when the helm chart is uninstalled. +crdCleanup: + enabled: false + image: + repository: ghcr.io/azure/fleet/crd-cleanup + pullPolicy: Always + tag: main + logVerbosity: 2 + logVerbosity: 5 refreshtoken: diff --git a/cmd/crdcleanup/main.go b/cmd/crdcleanup/main.go new file mode 100644 index 000000000..083ec0e2e --- /dev/null +++ b/cmd/crdcleanup/main.go @@ -0,0 +1,107 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +// Package main contains the CRD cleanup job for KubeFleet. +// This job cleans up all CRDs that were installed by the CRD installer +// when the Fleet agents are uninstalled via Helm pre-delete hook. +package main + +import ( + "context" + "flag" + "os" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "go.goms.io/fleet/cmd/crdinstaller/utils" +) + +var mode = flag.String("mode", "", "Mode to run in: 'hub' or 'member' (required)") + +func main() { + klog.InitFlags(nil) + flag.Parse() + + // Validate required flags. + if *mode != "hub" && *mode != "member" { + klog.Fatal("--mode flag must be either 'hub' or 'member'") + } + + klog.Infof("Starting CRD cleanup job in %s mode", *mode) + + // Print all flags for debugging. + flag.VisitAll(func(f *flag.Flag) { + klog.V(2).InfoS("flag:", "name", f.Name, "value", f.Value) + }) + + // Get Kubernetes config using controller-runtime. + config := ctrl.GetConfigOrDie() + + // Create a scheme that knows about CRD types. + scheme := runtime.NewScheme() + if err := apiextensionsv1.AddToScheme(scheme); err != nil { + klog.Fatalf("Failed to add apiextensions scheme: %v", err) + } + + k8sClient, err := client.New(config, client.Options{ + Scheme: scheme, + }) + if err != nil { + klog.Fatalf("Failed to create Kubernetes client: %v", err) + } + + // Create context for cleanup operations. + ctx := context.Background() + + // Perform cleanup. + if err := cleanupCRDs(ctx, k8sClient, *mode); err != nil { + klog.Errorf("Failed to cleanup CRDs: %v", err) + os.Exit(1) + } + + klog.Info("CRD cleanup completed successfully") +} + +// cleanupCRDs deletes all CRDs that were installed by the CRD installer for the given mode. +// It uses the mode label to identify which CRDs to delete. +func cleanupCRDs(ctx context.Context, k8sClient client.Client, mode string) error { + // List all CRDs with both the managed label and the matching mode label. + crdList := &apiextensionsv1.CustomResourceDefinitionList{} + labelSelector := labels.SelectorFromSet(labels.Set{ + utils.CRDInstallerLabelKey: "true", + utils.CRDInstallerModeLabel: mode, + }) + + if err := k8sClient.List(ctx, crdList, &client.ListOptions{ + LabelSelector: labelSelector, + }); err != nil { + return err + } + + klog.Infof("Found %d CRDs to cleanup for mode %s", len(crdList.Items), mode) + + // Delete all matching CRDs. + var deletedCount int + for i := range crdList.Items { + crd := &crdList.Items[i] + + klog.Infof("Deleting CRD: %s", crd.Name) + if err := k8sClient.Delete(ctx, crd); err != nil { + klog.Errorf("Failed to delete CRD %s: %v", crd.Name, err) + // Continue with other CRDs even if one fails. + continue + } + deletedCount++ + klog.Infof("Successfully deleted CRD: %s", crd.Name) + } + + klog.Infof("Cleanup complete: deleted %d CRDs", deletedCount) + return nil +} diff --git a/cmd/crdcleanup/main_test.go b/cmd/crdcleanup/main_test.go new file mode 100644 index 000000000..7c5b8e778 --- /dev/null +++ b/cmd/crdcleanup/main_test.go @@ -0,0 +1,208 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package main + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "go.goms.io/fleet/cmd/crdinstaller/utils" +) + +var lessFunc = func(s1, s2 string) bool { + return s1 < s2 +} + +func TestCleanupCRDs(t *testing.T) { + scheme := runtime.NewScheme() + if err := apiextensionsv1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add apiextensions scheme: %v", err) + } + + // Create test CRDs with mode labels. + hubCRD1 := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "memberclusters.cluster.kubernetes-fleet.io", + Labels: map[string]string{ + utils.CRDInstallerLabelKey: "true", + utils.CRDInstallerModeLabel: "hub", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "cluster.kubernetes-fleet.io", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "memberclusters", + Singular: "membercluster", + Kind: "MemberCluster", + }, + Scope: apiextensionsv1.ClusterScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1beta1", Served: true, Storage: true, Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{Type: "object"}, + }}, + }, + }, + } + + hubCRD2 := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterprofiles.multicluster.x-k8s.io", + Labels: map[string]string{ + utils.CRDInstallerLabelKey: "true", + utils.CRDInstallerModeLabel: "hub", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "multicluster.x-k8s.io", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "clusterprofiles", + Singular: "clusterprofile", + Kind: "ClusterProfile", + }, + Scope: apiextensionsv1.ClusterScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1alpha1", Served: true, Storage: true, Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{Type: "object"}, + }}, + }, + }, + } + + memberCRD := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "appliedworks.placement.kubernetes-fleet.io", + Labels: map[string]string{ + utils.CRDInstallerLabelKey: "true", + utils.CRDInstallerModeLabel: "member", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "placement.kubernetes-fleet.io", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "appliedworks", + Singular: "appliedwork", + Kind: "AppliedWork", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1beta1", Served: true, Storage: true, Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{Type: "object"}, + }}, + }, + }, + } + + unmanagedCRD := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unmanaged.example.com", + // No managed label or mode label + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "unmanageds", + Singular: "unmanaged", + Kind: "Unmanaged", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1", Served: true, Storage: true, Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{Type: "object"}, + }}, + }, + }, + } + + tests := []struct { + name string + mode string + existingCRDs []apiextensionsv1.CustomResourceDefinition + wantRemainingCRDs []string + wantError bool + }{ + { + name: "hub mode - deletes only hub-labeled CRDs", + mode: "hub", + existingCRDs: []apiextensionsv1.CustomResourceDefinition{ + *hubCRD1, + *hubCRD2, + *memberCRD, + *unmanagedCRD, + }, + // After hub cleanup: member CRD and unmanaged CRD should remain + wantRemainingCRDs: []string{ + "appliedworks.placement.kubernetes-fleet.io", + "unmanaged.example.com", + }, + wantError: false, + }, + { + name: "member mode - deletes only member-labeled CRDs", + mode: "member", + existingCRDs: []apiextensionsv1.CustomResourceDefinition{ + *hubCRD1, + *memberCRD, + *unmanagedCRD, + }, + // After member cleanup: hub CRD and unmanaged CRD should remain + wantRemainingCRDs: []string{ + "memberclusters.cluster.kubernetes-fleet.io", + "unmanaged.example.com", + }, + wantError: false, + }, + { + name: "no CRDs to cleanup", + mode: "hub", + existingCRDs: []apiextensionsv1.CustomResourceDefinition{*unmanagedCRD}, + wantRemainingCRDs: []string{"unmanaged.example.com"}, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Build fake client with existing CRDs. + objs := make([]runtime.Object, len(tt.existingCRDs)) + for i := range tt.existingCRDs { + objs[i] = &tt.existingCRDs[i] + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objs...). + Build() + + // Run cleanup. + err := cleanupCRDs(context.Background(), fakeClient, tt.mode) + if (err != nil) != tt.wantError { + t.Errorf("cleanupCRDs() error = %v, wantError %v", err, tt.wantError) + return + } + + // Verify remaining CRDs. + var remainingCRDs apiextensionsv1.CustomResourceDefinitionList + if err := fakeClient.List(context.Background(), &remainingCRDs); err != nil { + t.Fatalf("Failed to list remaining CRDs: %v", err) + } + + gotNames := make([]string, len(remainingCRDs.Items)) + for i, crd := range remainingCRDs.Items { + gotNames[i] = crd.Name + } + + if diff := cmp.Diff(tt.wantRemainingCRDs, gotNames, cmpopts.SortSlices(lessFunc)); diff != "" { + t.Errorf("Remaining CRDs mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/crdinstaller/main.go b/cmd/crdinstaller/main.go index ba945f981..25d71728e 100644 --- a/cmd/crdinstaller/main.go +++ b/cmd/crdinstaller/main.go @@ -87,9 +87,9 @@ func installCRDs(ctx context.Context, client client.Client, crdPath, mode string klog.Infof("Found %d CRDs to install for mode %s", len(crdsToInstall), mode) - // Install each CRD. + // Install each CRD with the mode label. for i := range crdsToInstall { - if err := utils.InstallCRD(ctx, client, &crdsToInstall[i]); err != nil { + if err := utils.InstallCRD(ctx, client, &crdsToInstall[i], mode); err != nil { return err } } diff --git a/cmd/crdinstaller/utils/util.go b/cmd/crdinstaller/utils/util.go index 2ff94dc22..ac4e44091 100644 --- a/cmd/crdinstaller/utils/util.go +++ b/cmd/crdinstaller/utils/util.go @@ -26,6 +26,8 @@ import ( const ( // CRDInstallerLabelKey is the label key used to indicate that a CRD is managed by the installer. CRDInstallerLabelKey = "crd-installer.azurefleet.io/managed" + // CRDInstallerModeLabel is the label key used to indicate the mode (hub/member) that installed the CRD. + CRDInstallerModeLabel = "crd-installer.azurefleet.io/mode" // AzureManagedLabelKey is the label key used to indicate that a CRD is managed by an azure resource. AzureManagedLabelKey = "kubernetes.azure.com/managedby" // FleetLabelValue is the value for the AzureManagedLabelKey indicating management by Fleet. @@ -42,8 +44,9 @@ var ( ) // InstallCRD creates/updates a Custom Resource Definition (CRD) from the provided CRD object. -func InstallCRD(ctx context.Context, client client.Client, crd *apiextensionsv1.CustomResourceDefinition) error { - klog.V(2).Infof("Installing CRD: %s", crd.Name) +// The mode parameter ("hub" or "member") is used to label the CRD for cleanup identification. +func InstallCRD(ctx context.Context, client client.Client, crd *apiextensionsv1.CustomResourceDefinition, mode string) error { + klog.V(2).Infof("Installing CRD: %s (mode: %s)", crd.Name, mode) existingCRD := apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ @@ -55,12 +58,14 @@ func InstallCRD(ctx context.Context, client client.Client, crd *apiextensionsv1. // Copy spec from our decoded CRD to the object we're creating/updating. existingCRD.Spec = crd.Spec - // Add an additional ownership label to indicate this CRD is managed by the installer. + // Add labels to indicate this CRD is managed by the installer. if existingCRD.Labels == nil { existingCRD.Labels = make(map[string]string) } // Ensure the label for management by the installer is set. existingCRD.Labels[CRDInstallerLabelKey] = "true" + // Set the mode label to identify which agent (hub/member) installed this CRD. + existingCRD.Labels[CRDInstallerModeLabel] = mode // Also set the Azure managed label to indicate this is managed by Fleet, // needed for clean up of CRD by kube-addon-manager. existingCRD.Labels[AzureManagedLabelKey] = FleetLabelValue diff --git a/cmd/crdinstaller/utils/util_test.go b/cmd/crdinstaller/utils/util_test.go index 22f474b7d..34bda32b7 100644 --- a/cmd/crdinstaller/utils/util_test.go +++ b/cmd/crdinstaller/utils/util_test.go @@ -157,11 +157,19 @@ func TestInstallCRD(t *testing.T) { tests := []struct { name string crd *apiextensionsv1.CustomResourceDefinition + mode string wantError bool }{ { - name: "successful CRD installation", + name: "successful CRD installation in hub mode", crd: testCRD, + mode: "hub", + wantError: false, + }, + { + name: "successful CRD installation in member mode", + crd: testCRD, + mode: "member", wantError: false, }, } @@ -169,7 +177,7 @@ func TestInstallCRD(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() - err := InstallCRD(context.Background(), fakeClient, tt.crd) + err := InstallCRD(context.Background(), fakeClient, tt.crd, tt.mode) if tt.wantError { if err == nil { @@ -194,6 +202,10 @@ func TestInstallCRD(t *testing.T) { t.Errorf("Expected CRD label %s to be 'true', got %q", CRDInstallerLabelKey, installedCRD.Labels[CRDInstallerLabelKey]) } + if installedCRD.Labels[CRDInstallerModeLabel] != tt.mode { + t.Errorf("Expected CRD label %s to be %q, got %q", CRDInstallerModeLabel, tt.mode, installedCRD.Labels[CRDInstallerModeLabel]) + } + if installedCRD.Labels[AzureManagedLabelKey] != FleetLabelValue { t.Errorf("Expected CRD label %s to be %q, got %q", AzureManagedLabelKey, FleetLabelValue, installedCRD.Labels[AzureManagedLabelKey]) } diff --git a/docker/crd-cleanup.Dockerfile b/docker/crd-cleanup.Dockerfile new file mode 100644 index 000000000..d4c689c1d --- /dev/null +++ b/docker/crd-cleanup.Dockerfile @@ -0,0 +1,32 @@ +# Build the crdcleanup binary +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.9 AS builder + +ARG GOOS=linux +ARG GOARCH=amd64 + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/crdcleanup/ cmd/crdcleanup/ +COPY cmd/crdinstaller/utils/ cmd/crdinstaller/utils/ +COPY pkg pkg + +# Build with CGO enabled and GOEXPERIMENT=systemcrypto for internal usage +RUN echo "Building for GOOS=$GOOS GOARCH=$GOARCH" +RUN CGO_ENABLED=1 GOOS=$GOOS GOARCH=$GOARCH GOEXPERIMENT=systemcrypto GO111MODULE=on go build -o crdcleanup cmd/crdcleanup/main.go + +# Use Azure Linux distroless base image to package the crdcleanup binary +# Refer to https://mcr.microsoft.com/en-us/artifact/mar/azurelinux/distroless/base/about for more details +FROM mcr.microsoft.com/azurelinux/distroless/base:3.0 +WORKDIR / +COPY --from=builder /workspace/crdcleanup . + +USER 65532:65532 + +ENTRYPOINT ["/crdcleanup"] diff --git a/go.mod b/go.mod index c8524cc40..77c698443 100644 --- a/go.mod +++ b/go.mod @@ -42,6 +42,7 @@ require ( sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.20 sigs.k8s.io/cluster-inventory-api v0.0.0-20251028164203-2e3fabb46733 sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/yaml v1.6.0 ) require ( @@ -134,7 +135,6 @@ require ( sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect ) replace ( diff --git a/test/crdinstaller/crd_installer_integration_test.go b/test/crdinstaller/crd_installer_integration_test.go index 7aee4a5d0..e2f57b7fe 100644 --- a/test/crdinstaller/crd_installer_integration_test.go +++ b/test/crdinstaller/crd_installer_integration_test.go @@ -23,6 +23,7 @@ const ( originalCRDPath = "./original_crds/test.kubernetes-fleet.io_testresources.yaml" updatedCRDPath = "./updated_crds/test.kubernetes-fleet.io_testresources.yaml" randomLabelKey = "random-label.io" + testMode = "test" ) const ( eventuallyDuration = time.Minute * 1 @@ -36,13 +37,14 @@ var _ = Describe("Test CRD Installer, Create and Update CRD", Ordered, func() { It("should create original CRD", func() { crd, err := cmdCRDInstaller.GetCRDFromPath(originalCRDPath, scheme) Expect(err).NotTo(HaveOccurred(), "should get CRD from path %s", originalCRDPath) - Expect(cmdCRDInstaller.InstallCRD(ctx, k8sClient, crd)).To(Succeed()) + Expect(cmdCRDInstaller.InstallCRD(ctx, k8sClient, crd, testMode)).To(Succeed()) }) It("should verify original CRD installation", func() { ensureCRDExistsWithLabels(map[string]string{ - cmdCRDInstaller.CRDInstallerLabelKey: "true", - cmdCRDInstaller.AzureManagedLabelKey: cmdCRDInstaller.FleetLabelValue, + cmdCRDInstaller.CRDInstallerLabelKey: "true", + cmdCRDInstaller.CRDInstallerModeLabel: testMode, + cmdCRDInstaller.AzureManagedLabelKey: cmdCRDInstaller.FleetLabelValue, }) crd := &apiextensionsv1.CustomResourceDefinition{} Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crdName}, crd)).NotTo(HaveOccurred(), "CRD %s should be installed", crdName) @@ -61,15 +63,16 @@ var _ = Describe("Test CRD Installer, Create and Update CRD", Ordered, func() { It("should update the CRD with new field in spec with crdinstaller label", func() { crd, err := cmdCRDInstaller.GetCRDFromPath(updatedCRDPath, scheme) Expect(err).NotTo(HaveOccurred(), "should get CRD from path %s", updatedCRDPath) - Expect(cmdCRDInstaller.InstallCRD(ctx, k8sClient, crd)).To(Succeed()) + Expect(cmdCRDInstaller.InstallCRD(ctx, k8sClient, crd, testMode)).To(Succeed()) }) It("should verify updated CRD", func() { // ensure we don't overwrite the random label. ensureCRDExistsWithLabels(map[string]string{ - randomLabelKey: "true", - cmdCRDInstaller.CRDInstallerLabelKey: "true", - cmdCRDInstaller.AzureManagedLabelKey: cmdCRDInstaller.FleetLabelValue, + randomLabelKey: "true", + cmdCRDInstaller.CRDInstallerLabelKey: "true", + cmdCRDInstaller.CRDInstallerModeLabel: testMode, + cmdCRDInstaller.AzureManagedLabelKey: cmdCRDInstaller.FleetLabelValue, }) crd := &apiextensionsv1.CustomResourceDefinition{} Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crdName}, crd)).NotTo(HaveOccurred(), "CRD %s should be installed", crdName) diff --git a/test/e2e/setup.sh b/test/e2e/setup.sh index f53d4d3a8..bf0928791 100755 --- a/test/e2e/setup.sh +++ b/test/e2e/setup.sh @@ -25,6 +25,7 @@ export HUB_AGENT_IMAGE="${HUB_AGENT_IMAGE:-hub-agent}" export MEMBER_AGENT_IMAGE="${MEMBER_AGENT_IMAGE:-member-agent}" export REFRESH_TOKEN_IMAGE="${REFRESH_TOKEN_IMAGE:-refresh-token}" export CRD_INSTALLER_IMAGE="${CRD_INSTALLER_IMAGE:-crd-installer}" +export CRD_CLEANUP_IMAGE="${CRD_CLEANUP_IMAGE:-crd-cleanup}" export PROPERTY_PROVIDER="${PROPERTY_PROVIDER:-azure}" export USE_PREDEFINED_REGIONS="${USE_PREDEFINED_REGIONS:-false}" export RESOURCE_SNAPSHOT_CREATION_MINIMUM_INTERVAL="${RESOURCE_SNAPSHOT_CREATION_MINIMUM_INTERVAL:-0m}" @@ -102,18 +103,21 @@ make -C "../.." docker-build-hub-agent make -C "../.." docker-build-member-agent make -C "../.." docker-build-refresh-token make -C "../.." docker-build-crd-installer +make -C "../.." docker-build-crd-cleanup # Load the Fleet agent images into the kind clusters # Load the hub agent image into the hub cluster kind load docker-image --name $HUB_CLUSTER $REGISTRY/$HUB_AGENT_IMAGE:$TAG kind load docker-image --name $HUB_CLUSTER $REGISTRY/$CRD_INSTALLER_IMAGE:$TAG +kind load docker-image --name $HUB_CLUSTER $REGISTRY/$CRD_CLEANUP_IMAGE:$TAG # Load the member agent image and the refresh token image into the member clusters for i in "${MEMBER_CLUSTERS[@]}" do kind load docker-image --name "$i" $REGISTRY/$MEMBER_AGENT_IMAGE:$TAG kind load docker-image --name "$i" $REGISTRY/$CRD_INSTALLER_IMAGE:$TAG + kind load docker-image --name "$i" $REGISTRY/$CRD_CLEANUP_IMAGE:$TAG kind load docker-image --name "$i" $REGISTRY/$REFRESH_TOKEN_IMAGE:$TAG done @@ -144,9 +148,13 @@ helm install hub-agent ../../charts/hub-agent/ \ --set crdInstaller.image.repository=$REGISTRY/$CRD_INSTALLER_IMAGE \ --set crdInstaller.image.tag=$TAG \ --set crdInstaller.image.pullPolicy=Never \ + --set crdCleanup.enabled=true \ + --set crdCleanup.image.repository=$REGISTRY/$CRD_CLEANUP_IMAGE \ + --set crdCleanup.image.tag=$TAG \ + --set crdCleanup.image.pullPolicy=Never \ --set namespace=fleet-system \ --set logVerbosity=5 \ - --set replicaCount=3 \ + --set replicaCount=1 \ --set useCertManager=true \ --set webhookCertSecretName=fleet-webhook-server-cert \ --set enableWebhook=true \ @@ -218,6 +226,10 @@ do --set crdInstaller.image.repository=$REGISTRY/$CRD_INSTALLER_IMAGE \ --set crdInstaller.image.tag=$TAG \ --set crdInstaller.image.pullPolicy=Never \ + --set crdCleanup.enabled=true \ + --set crdCleanup.image.repository=$REGISTRY/$CRD_CLEANUP_IMAGE \ + --set crdCleanup.image.tag=$TAG \ + --set crdCleanup.image.pullPolicy=Never \ --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ --set namespace=fleet-system \ @@ -238,6 +250,10 @@ do --set crdInstaller.image.repository=$REGISTRY/$CRD_INSTALLER_IMAGE \ --set crdInstaller.image.tag=$TAG \ --set crdInstaller.image.pullPolicy=Never \ + --set crdCleanup.enabled=true \ + --set crdCleanup.image.repository=$REGISTRY/$CRD_CLEANUP_IMAGE \ + --set crdCleanup.image.tag=$TAG \ + --set crdCleanup.image.pullPolicy=Never \ --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ --set namespace=fleet-system \