diff --git a/examples/builtins/main.go b/examples/builtins/main.go index 8ea173b248..c791b0bf10 100644 --- a/examples/builtins/main.go +++ b/examples/builtins/main.go @@ -59,14 +59,17 @@ func main() { } // Watch ReplicaSets and enqueue ReplicaSet object key - if err := c.Watch(source.Kind(mgr.GetCache(), &appsv1.ReplicaSet{}), &handler.EnqueueRequestForObject{}); err != nil { + if err := c.Watch(source.Kind(mgr.GetCache(), &appsv1.ReplicaSet{}, &handler.EnqueueRequestForObject{})); err != nil { entryLog.Error(err, "unable to watch ReplicaSets") os.Exit(1) } // Watch Pods and enqueue owning ReplicaSet key - if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1.ReplicaSet{}, handler.OnlyControllerOwner())); err != nil { + if err := c.Watch(source.Kind( + mgr.GetCache(), + &corev1.Pod{}, + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()), + )); err != nil { entryLog.Error(err, "unable to watch Pods") os.Exit(1) } diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go index 1a115f2f7b..8c51fa5ca7 100644 --- a/pkg/builder/controller.go +++ b/pkg/builder/controller.go @@ -30,7 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" - internalsource "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -54,14 +53,15 @@ const ( // Builder builds a Controller. type Builder struct { - forInput ForInput - ownsInput []OwnsInput - watchesInput []WatchesInput - mgr manager.Manager - globalPredicates []predicate.Predicate - ctrl controller.Controller - ctrlOptions controller.Options - name string + forInput ForInput + ownsInput []OwnsInput + watchesObjectInput []WatchesObjectInput + watchesSourceInput []WatchesSourceInput + mgr manager.Manager + globalPredicates []predicate.Predicate + ctrl controller.Controller + ctrlOptions controller.Options + name string } // ControllerManagedBy returns a new controller builder that will be started by the provided Manager. @@ -80,7 +80,7 @@ type ForInput struct { // For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / // update events by *reconciling the object*. // This is the equivalent of calling -// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}). +// Watches(source.Kind(mgr.GetCache(), apiType, &handler.EnqueueRequestForObject{})). func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { if blder.forInput.object != nil { blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") @@ -121,10 +121,10 @@ func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { return blder } -// WatchesInput represents the information set by Watches method. -type WatchesInput struct { - src source.Source - eventHandler handler.EventHandler +// WatchesObjectInput represents the information set by Watches method. +type WatchesObjectInput struct { + object client.Object + eventhandler handler.EventHandler predicates []predicate.Predicate objectProjection objectProjection } @@ -133,10 +133,15 @@ type WatchesInput struct { // update events by *reconciling the object* with the given EventHandler. // // This is the equivalent of calling -// WatchesRawSource(source.Kind(cache, object), eventHandler, opts...). -func (blder *Builder) Watches(object client.Object, eventHandler handler.EventHandler, opts ...WatchesOption) *Builder { - src := source.Kind(blder.mgr.GetCache(), object) - return blder.WatchesRawSource(src, eventHandler, opts...) +// WatchesRawSource(source.Kind(scheme, object, eventhandler, opts...)). +func (blder *Builder) Watches(object client.Object, eventhandler handler.EventHandler, opts ...WatchesObjectOption) *Builder { + input := WatchesObjectInput{object: object, eventhandler: eventhandler} + for _, opt := range opts { + opt.ApplyToWatchesObject(&input) + } + + blder.watchesObjectInput = append(blder.watchesObjectInput, input) + return blder } // WatchesMetadata is the same as Watches, but forces the internal cache to only watch PartialObjectMetadata. @@ -166,29 +171,30 @@ func (blder *Builder) Watches(object client.Object, eventHandler handler.EventHa // In the first case, controller-runtime will create another cache for the // concrete type on top of the metadata cache; this increases memory // consumption and leads to race conditions as caches are not in sync. -func (blder *Builder) WatchesMetadata(object client.Object, eventHandler handler.EventHandler, opts ...WatchesOption) *Builder { +func (blder *Builder) WatchesMetadata(object client.Object, eventhandler handler.EventHandler, opts ...WatchesObjectOption) *Builder { opts = append(opts, OnlyMetadata) - return blder.Watches(object, eventHandler, opts...) + return blder.Watches(object, eventhandler, opts...) +} + +// WatchesSourceInput represents the information set by Watches method. +type WatchesSourceInput struct { + src source.Source } // WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder. -// Specified predicates are registered only for given source. // // STOP! Consider using For(...), Owns(...), Watches(...), WatchesMetadata(...) instead. -// This method is only exposed for more advanced use cases, most users should use one of the higher level functions. -func (blder *Builder) WatchesRawSource(src source.Source, eventHandler handler.EventHandler, opts ...WatchesOption) *Builder { - input := WatchesInput{src: src, eventHandler: eventHandler} - for _, opt := range opts { - opt.ApplyToWatches(&input) - } - - blder.watchesInput = append(blder.watchesInput, input) +// This method is only exposed for more advanced use cases, most users should use higher level functions. +// This method does generally disregard all the global configuration set by the builder. +func (blder *Builder) WatchesRawSource(src source.Source) *Builder { + blder.watchesSourceInput = append(blder.watchesSourceInput, WatchesSourceInput{src: src}) return blder } // WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually // trigger reconciliations. For example, filtering on whether the resource version has changed. // Given predicate is added for all watched objects. +// The predicates are not applied to sources watched with WatchesRawSource(...). // Defaults to the empty list. func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { blder.globalPredicates = append(blder.globalPredicates, p) @@ -272,11 +278,14 @@ func (blder *Builder) doWatch() error { if err != nil { return err } - src := source.Kind(blder.mgr.GetCache(), obj) - hdler := &handler.EnqueueRequestForObject{} allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, blder.forInput.predicates...) - if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + src := source.Kind( + blder.mgr.GetCache(), + obj, + handler.WithPredicates(&handler.EnqueueRequestForObject{}, allPredicates...), + ) + if err := blder.ctrl.Watch(src); err != nil { return err } } @@ -290,7 +299,6 @@ func (blder *Builder) doWatch() error { if err != nil { return err } - src := source.Kind(blder.mgr.GetCache(), obj) opts := []handler.OwnerOption{} if !own.matchEveryOwner { opts = append(opts, handler.OnlyControllerOwner()) @@ -302,30 +310,43 @@ func (blder *Builder) doWatch() error { ) allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, own.predicates...) - if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + src := source.Kind( + blder.mgr.GetCache(), + obj, + handler.WithPredicates(hdler, allPredicates...), + ) + if err := blder.ctrl.Watch(src); err != nil { return err } } // Do the watch requests - if len(blder.watchesInput) == 0 && blder.forInput.object == nil { + if len(blder.watchesObjectInput) == 0 && len(blder.watchesSourceInput) == 0 && blder.forInput.object == nil { return errors.New("there are no watches configured, controller will never get triggered. Use For(), Owns() or Watches() to set them up") } - for _, w := range blder.watchesInput { - // If the source of this watch is of type Kind, project it. - if srcKind, ok := w.src.(*internalsource.Kind); ok { - typeForSrc, err := blder.project(srcKind.Type, w.objectProjection) - if err != nil { - return err - } - srcKind.Type = typeForSrc + for _, w := range blder.watchesObjectInput { + obj, err := blder.project(w.object, w.objectProjection) + if err != nil { + return err } + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) - if err := blder.ctrl.Watch(w.src, w.eventHandler, allPredicates...); err != nil { + src := source.Kind( + blder.mgr.GetCache(), obj, + handler.WithPredicates(w.eventhandler, allPredicates...), + ) + if err := blder.ctrl.Watch(src); err != nil { return err } } + + for _, w := range blder.watchesSourceInput { + if err := blder.ctrl.Watch(w.src); err != nil { + return err + } + } + return nil } diff --git a/pkg/builder/options.go b/pkg/builder/options.go index 15f66b2a82..1ffc7c5287 100644 --- a/pkg/builder/options.go +++ b/pkg/builder/options.go @@ -34,10 +34,10 @@ type OwnsOption interface { ApplyToOwns(*OwnsInput) } -// WatchesOption is some configuration that modifies options for a watches request. -type WatchesOption interface { - // ApplyToWatches applies this configuration to the given watches options. - ApplyToWatches(*WatchesInput) +// WatchesObjectOption is some configuration that modifies options for a watches request. +type WatchesObjectOption interface { + // ApplyToWatchesObject applies this configuration to the given watches options. + ApplyToWatchesObject(*WatchesObjectInput) } // }}} @@ -66,14 +66,14 @@ func (w Predicates) ApplyToOwns(opts *OwnsInput) { opts.predicates = w.predicates } -// ApplyToWatches applies this configuration to the given WatchesInput options. -func (w Predicates) ApplyToWatches(opts *WatchesInput) { +// ApplyToWatchesObject applies this configuration to the given WatchesInput options. +func (w Predicates) ApplyToWatchesObject(opts *WatchesObjectInput) { opts.predicates = w.predicates } var _ ForOption = &Predicates{} var _ OwnsOption = &Predicates{} -var _ WatchesOption = &Predicates{} +var _ WatchesObjectOption = &Predicates{} // }}} @@ -94,8 +94,8 @@ func (p projectAs) ApplyToOwns(opts *OwnsInput) { opts.objectProjection = objectProjection(p) } -// ApplyToWatches applies this configuration to the given WatchesInput options. -func (p projectAs) ApplyToWatches(opts *WatchesInput) { +// ApplyToWatchesObject applies this configuration to the given WatchesObjectInput options. +func (p projectAs) ApplyToWatchesObject(opts *WatchesObjectInput) { opts.objectProjection = objectProjection(p) } @@ -132,9 +132,9 @@ var ( // consumption and leads to race conditions as caches are not in sync. OnlyMetadata = projectAs(projectAsMetadata) - _ ForOption = OnlyMetadata - _ OwnsOption = OnlyMetadata - _ WatchesOption = OnlyMetadata + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesObjectOption = OnlyMetadata ) // }}} diff --git a/pkg/cache/informertest/fake_cache.go b/pkg/cache/informertest/fake_cache.go index a1a442316f..1052c25308 100644 --- a/pkg/cache/informertest/fake_cache.go +++ b/pkg/cache/informertest/fake_cache.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" toolscache "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -89,10 +90,7 @@ func (c *FakeInformers) RemoveInformer(ctx context.Context, obj client.Object) e // WaitForCacheSync implements Informers. func (c *FakeInformers) WaitForCacheSync(ctx context.Context) bool { - if c.Synced == nil { - return true - } - return *c.Synced + return ptr.Deref(c.Synced, true) } // FakeInformerFor implements Informers. @@ -116,7 +114,7 @@ func (c *FakeInformers) informerFor(gvk schema.GroupVersionKind, _ runtime.Objec return informer, nil } - c.InformersByGVK[gvk] = &controllertest.FakeInformer{} + c.InformersByGVK[gvk] = &controllertest.FakeInformer{Synced: ptr.Deref(c.Synced, true)} return c.InformersByGVK[gvk], nil } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e48db41f94..6c44cc5663 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -25,10 +25,8 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/internal/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" @@ -72,13 +70,15 @@ type Controller interface { // Reconciler is called to reconcile an object by Namespace/Name reconcile.Reconciler - // Watch takes events provided by a Source and uses the EventHandler to - // enqueue reconcile.Requests in response to the events. + // Watch takes events provided by a Source and enqueues reconcile.Requests + // in response to the events. + Watch(src source.Source) error + + // StopWatch stops watching a source that was previously registered by Watch(). // - // Watch may be provided one or more Predicates to filter events before - // they are given to the EventHandler. Events will be passed to the - // EventHandler if all provided Predicates evaluate to true. - Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error + // StopWatch may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + StopWatch(src source.Source) error // Start starts the controller. Start blocks until the context is closed or a // controller has an error starting. diff --git a/pkg/controller/controller_integration_test.go b/pkg/controller/controller_integration_test.go index 48facf1e94..aca2f517c6 100644 --- a/pkg/controller/controller_integration_test.go +++ b/pkg/controller/controller_integration_test.go @@ -64,13 +64,14 @@ var _ = Describe("controller", func() { Expect(err).NotTo(HaveOccurred()) By("Watching Resources") - err = instance.Watch( - source.Kind(cm.GetCache(), &appsv1.ReplicaSet{}), + err = instance.Watch(source.Kind( + cm.GetCache(), + &appsv1.ReplicaSet{}, handler.EnqueueRequestForOwner(cm.GetScheme(), cm.GetRESTMapper(), &appsv1.Deployment{}), - ) + )) Expect(err).NotTo(HaveOccurred()) - err = instance.Watch(source.Kind(cm.GetCache(), &appsv1.Deployment{}), &handler.EnqueueRequestForObject{}) + err = instance.Watch(source.Kind(cm.GetCache(), &appsv1.Deployment{}, &handler.EnqueueRequestForObject{})) Expect(err).NotTo(HaveOccurred()) err = cm.GetClient().Get(ctx, types.NamespacedName{Name: "foo"}, &corev1.Namespace{}) @@ -171,6 +172,102 @@ var _ = Describe("controller", func() { List(context.Background(), &controllertest.UnconventionalListTypeList{}) Expect(err).NotTo(HaveOccurred()) }) + + It("should not reconcile after watch is stopped", func() { + By("Creating the Manager") + cm, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the Controller") + instance, err := controller.New("foo-controller", cm, controller.Options{ + Reconciler: reconcile.Func( + func(_ context.Context, request reconcile.Request) (reconcile.Result, error) { + reconciled <- request + return reconcile.Result{}, nil + }), + }) + Expect(err).NotTo(HaveOccurred()) + + By("Watching Resources") + deploySource := source.Kind(cm.GetCache(), &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}) + err = instance.Watch( + deploySource, + ) + Expect(err).NotTo(HaveOccurred()) + + By("Starting the Manager") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(cm.Start(ctx)).NotTo(HaveOccurred()) + }() + + deploymentDefinition := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-name"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + SecurityContext: &corev1.SecurityContext{ + Privileged: truePtr(), + }, + }, + }, + }, + }, + }, + } + deployment := deploymentDefinition.DeepCopy() + expectedReconcileRequest := reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "deployment-name", + }} + + By("Invoking Reconciling for Create") + deployment, err = clientset.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Stopping the deployment watch") + Expect(instance.StopWatch(deploySource)).NotTo(HaveOccurred()) + + By("Test No Reconciling for Update") + newDeployment := deployment.DeepCopy() + newDeployment.Labels = map[string]string{"foo": "bar"} + _, err = clientset.AppsV1().Deployments("default").Update(ctx, newDeployment, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(reconciled).ShouldNot(Receive()) + + By("Test No Reconciling for Delete") + err = clientset.AppsV1().Deployments("default"). + Delete(ctx, "deployment-name", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(reconciled).ShouldNot(Receive()) + + By("Try starting the old deployment watch") + Expect(instance.Watch(deploySource)).To(MatchError("cannot start an already started Kind source")) + + By("Starting a new deployment watch") + deploySource = source.Kind(cm.GetCache(), &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Watch(deploySource)).NotTo(HaveOccurred()) + + deployment = deploymentDefinition.DeepCopy() + + By("Invoking Reconciling for Update") + newDeployment = deployment.DeepCopy() + newDeployment.Labels = map[string]string{"foo": "bar"} + _, err = clientset.AppsV1().Deployments("default").Create(ctx, newDeployment, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + }) }) }) diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index e49a2c5774..5ad4a3de8b 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -77,7 +77,7 @@ var _ = Describe("controller.Controller", func() { ctx, cancel := context.WithCancel(context.Background()) watchChan := make(chan event.GenericEvent, 1) - watch := &source.Channel{Source: watchChan} + watch := source.Channel(source.NewChannelBroadcaster(watchChan), &handler.EnqueueRequestForObject{}) watchChan <- event.GenericEvent{Object: &corev1.Pod{}} reconcileStarted := make(chan struct{}) @@ -99,7 +99,7 @@ var _ = Describe("controller.Controller", func() { Expect(err).NotTo(HaveOccurred()) c, err := controller.New("new-controller", m, controller.Options{Reconciler: rec}) - Expect(c.Watch(watch, &handler.EnqueueRequestForObject{})).To(Succeed()) + Expect(c.Watch(watch)).To(Succeed()) Expect(err).NotTo(HaveOccurred()) go func() { diff --git a/pkg/controller/controllertest/util.go b/pkg/controller/controllertest/util.go index 60ec61edec..2468651af4 100644 --- a/pkg/controller/controllertest/util.go +++ b/pkg/controller/controllertest/util.go @@ -17,6 +17,8 @@ limitations under the License. package controllertest import ( + "fmt" + "sync" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,7 +35,8 @@ type FakeInformer struct { // RunCount is incremented each time RunInformersAndControllers is called RunCount int - handlers []eventHandlerWrapper + mu sync.RWMutex + handlers []*eventHandlerWrapper } type modernResourceEventHandler interface { @@ -51,7 +54,8 @@ type legacyResourceEventHandler interface { // eventHandlerWrapper wraps a ResourceEventHandler in a manner that is compatible with client-go 1.27+ and older. // The interface was changed in these versions. type eventHandlerWrapper struct { - handler any + handler any + hasSynced bool } func (e eventHandlerWrapper) OnAdd(obj interface{}) { @@ -78,6 +82,10 @@ func (e eventHandlerWrapper) OnDelete(obj interface{}) { e.handler.(legacyResourceEventHandler).OnDelete(obj) } +func (e eventHandlerWrapper) HasSynced() bool { + return e.hasSynced +} + // AddIndexers does nothing. TODO(community): Implement this. func (f *FakeInformer) AddIndexers(indexers cache.Indexers) error { return nil @@ -98,10 +106,13 @@ func (f *FakeInformer) HasSynced() bool { return f.Synced } -// AddEventHandler implements the Informer interface. Adds an EventHandler to the fake Informers. TODO(community): Implement Registration. +// AddEventHandler implements the Informer interface. Adds an EventHandler to the fake Informers. func (f *FakeInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { - f.handlers = append(f.handlers, eventHandlerWrapper{handler}) - return nil, nil + f.mu.Lock() + defer f.mu.Unlock() + eh := &eventHandlerWrapper{handler, f.Synced} + f.handlers = append(f.handlers, eh) + return eh, nil } // Run implements the Informer interface. Increments f.RunCount. @@ -111,6 +122,8 @@ func (f *FakeInformer) Run(<-chan struct{}) { // Add fakes an Add event for obj. func (f *FakeInformer) Add(obj metav1.Object) { + f.mu.RLock() + defer f.mu.RUnlock() for _, h := range f.handlers { h.OnAdd(obj) } @@ -118,6 +131,8 @@ func (f *FakeInformer) Add(obj metav1.Object) { // Update fakes an Update event for obj. func (f *FakeInformer) Update(oldObj, newObj metav1.Object) { + f.mu.RLock() + defer f.mu.RUnlock() for _, h := range f.handlers { h.OnUpdate(oldObj, newObj) } @@ -125,6 +140,8 @@ func (f *FakeInformer) Update(oldObj, newObj metav1.Object) { // Delete fakes an Delete event for obj. func (f *FakeInformer) Delete(obj metav1.Object) { + f.mu.RLock() + defer f.mu.RUnlock() for _, h := range f.handlers { h.OnDelete(obj) } @@ -135,8 +152,25 @@ func (f *FakeInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEve return nil, nil } -// RemoveEventHandler does nothing. TODO(community): Implement this. +// RemoveEventHandler removes an EventHandler to the fake Informers. func (f *FakeInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error { + eh, ok := handle.(*eventHandlerWrapper) + if !ok { + return fmt.Errorf("invalid registration type %t", handle) + } + + f.mu.Lock() + defer f.mu.Unlock() + + handlers := make([]*eventHandlerWrapper, 0, len(f.handlers)) + for _, h := range f.handlers { + if h == eh { + continue + } + handlers = append(handlers, h) + } + f.handlers = handlers + return nil } diff --git a/pkg/controller/example_test.go b/pkg/controller/example_test.go index d4fa1aef0b..85e9b9737b 100644 --- a/pkg/controller/example_test.go +++ b/pkg/controller/example_test.go @@ -71,7 +71,7 @@ func ExampleController() { } // Watch for Pod create / update / delete events and call Reconcile - err = c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), &handler.EnqueueRequestForObject{}) + err = c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.EnqueueRequestForObject{})) if err != nil { log.Error(err, "unable to watch pods") os.Exit(1) @@ -108,7 +108,7 @@ func ExampleController_unstructured() { Version: "v1", }) // Watch for Pod create / update / delete events and call Reconcile - err = c.Watch(source.Kind(mgr.GetCache(), u), &handler.EnqueueRequestForObject{}) + err = c.Watch(source.Kind(mgr.GetCache(), u, &handler.EnqueueRequestForObject{})) if err != nil { log.Error(err, "unable to watch pods") os.Exit(1) @@ -139,7 +139,7 @@ func ExampleNewUnmanaged() { os.Exit(1) } - if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), &handler.EnqueueRequestForObject{}); err != nil { + if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.EnqueueRequestForObject{})); err != nil { log.Error(err, "unable to watch pods") os.Exit(1) } diff --git a/pkg/handler/example_test.go b/pkg/handler/example_test.go index 575ea05fca..cd73fb53fc 100644 --- a/pkg/handler/example_test.go +++ b/pkg/handler/example_test.go @@ -42,8 +42,7 @@ var ( func ExampleEnqueueRequestForObject() { // controller is a controller.controller err := c.Watch( - source.Kind(mgr.GetCache(), &corev1.Pod{}), - &handler.EnqueueRequestForObject{}, + source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.EnqueueRequestForObject{}), ) if err != nil { // handle it @@ -54,10 +53,11 @@ func ExampleEnqueueRequestForObject() { // owning (direct) Deployment responsible for the creation of the ReplicaSet. func ExampleEnqueueRequestForOwner() { // controller is a controller.controller - err := c.Watch( - source.Kind(mgr.GetCache(), &appsv1.ReplicaSet{}), + err := c.Watch(source.Kind( + mgr.GetCache(), + &appsv1.ReplicaSet{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1.Deployment{}, handler.OnlyControllerOwner()), - ) + )) if err != nil { // handle it } @@ -67,8 +67,9 @@ func ExampleEnqueueRequestForOwner() { // objects (of Type: MyKind) using a mapping function defined by the user. func ExampleEnqueueRequestsFromMapFunc() { // controller is a controller.controller - err := c.Watch( - source.Kind(mgr.GetCache(), &appsv1.Deployment{}), + err := c.Watch(source.Kind( + mgr.GetCache(), + &appsv1.Deployment{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { return []reconcile.Request{ {NamespacedName: types.NamespacedName{ @@ -81,7 +82,7 @@ func ExampleEnqueueRequestsFromMapFunc() { }}, } }), - ) + )) if err != nil { // handle it } @@ -90,8 +91,9 @@ func ExampleEnqueueRequestsFromMapFunc() { // This example implements handler.EnqueueRequestForObject. func ExampleFuncs() { // controller is a controller.controller - err := c.Watch( - source.Kind(mgr.GetCache(), &corev1.Pod{}), + err := c.Watch(source.Kind( + mgr.GetCache(), + &corev1.Pod{}, handler.Funcs{ CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -118,7 +120,7 @@ func ExampleFuncs() { }}) }, }, - ) + )) if err != nil { // handle it } diff --git a/pkg/handler/predicates.go b/pkg/handler/predicates.go new file mode 100644 index 0000000000..81175e755b --- /dev/null +++ b/pkg/handler/predicates.go @@ -0,0 +1,47 @@ +package handler + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// WithPredicates returns an EventHandler that only calls the underlying handler if all the given predicates return true. +func WithPredicates(handler EventHandler, predicates ...predicate.Predicate) EventHandler { + return &Funcs{ + CreateFunc: func(ctx context.Context, createEvent event.CreateEvent, queue workqueue.RateLimitingInterface) { + for _, predicate := range predicates { + if !predicate.Create(createEvent) { + return + } + } + handler.Create(ctx, createEvent, queue) + }, + UpdateFunc: func(ctx context.Context, updateEvent event.UpdateEvent, queue workqueue.RateLimitingInterface) { + for _, predicate := range predicates { + if !predicate.Update(updateEvent) { + return + } + } + handler.Update(ctx, updateEvent, queue) + }, + DeleteFunc: func(ctx context.Context, deleteEvent event.DeleteEvent, queue workqueue.RateLimitingInterface) { + for _, predicate := range predicates { + if !predicate.Delete(deleteEvent) { + return + } + } + handler.Delete(ctx, deleteEvent, queue) + }, + GenericFunc: func(ctx context.Context, genericEvent event.GenericEvent, queue workqueue.RateLimitingInterface) { + for _, predicate := range predicates { + if !predicate.Generic(genericEvent) { + return + } + } + handler.Generic(ctx, genericEvent, queue) + }, + } +} diff --git a/pkg/internal/controller/controller.go b/pkg/internal/controller/controller.go index 33883647b9..410c09456e 100644 --- a/pkg/internal/controller/controller.go +++ b/pkg/internal/controller/controller.go @@ -28,11 +28,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/util/workqueue" - - "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -59,12 +56,18 @@ type Controller struct { // the Queue for processing Queue workqueue.RateLimitingInterface + // startedSources maintains a list of sources that have already started. + startedSources []cancelableSource + // mu is used to synchronize Controller setup mu sync.Mutex // Started is true if the Controller has been Started Started bool + // Stopped is true if the Controller has been Stopped + Stopped bool + // ctx is the context that was passed to Start() and used when starting watches. // // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, @@ -94,9 +97,7 @@ type Controller struct { // watchDescription contains all the information necessary to start a watch. type watchDescription struct { - src source.Source - handler handler.EventHandler - predicates []predicate.Predicate + src source.Source } // Reconcile implements reconcile.Reconciler. @@ -120,20 +121,71 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ re } // Watch implements controller.Controller. -func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { +func (c *Controller) Watch(src source.Source) error { c.mu.Lock() defer c.mu.Unlock() + if c.Stopped { + return fmt.Errorf("can not start watch in a stopped controller") + } + // Controller hasn't started yet, store the watches locally and return. // // These watches are going to be held on the controller struct until the manager or user calls Start(...). if !c.Started { - c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct}) + c.startWatches = append(c.startWatches, watchDescription{src: src}) return nil } c.LogConstructor(nil).Info("Starting EventSource", "source", src) - return src.Start(c.ctx, evthdler, c.Queue, prct...) + return c.startWatch(src) +} + +type cancelableSource struct { + source.Source + cancel context.CancelFunc +} + +func (c *Controller) startWatch(src source.Source) error { + watchCtx, watchCancel := context.WithCancel(c.ctx) + if err := src.Start(watchCtx, c.Queue); err != nil { + watchCancel() + return err + } + + c.startedSources = append(c.startedSources, cancelableSource{ + Source: src, + cancel: watchCancel, + }) + + return nil +} + +// StopWatch implements controller.Controller. +func (c *Controller) StopWatch(src source.Source) error { + err := func() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.Stopped { + return fmt.Errorf("can not stop watch in a stopped controller") + } + + for i, startedSrc := range c.startedSources { + if startedSrc.Source == src { + c.startedSources = append(c.startedSources[:i], c.startedSources[i+1:]...) + startedSrc.cancel() + return startedSrc.Source.Shutdown() + } + } + + return nil + }() + if err != nil { + return err + } + + return src.Shutdown() } // NeedLeaderElection implements the manager.LeaderElectionRunnable interface. @@ -149,6 +201,9 @@ func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling // but lock outside to get proper handling of the queue shutdown c.mu.Lock() + if c.Stopped { + return fmt.Errorf("can not restart a stopped controller, you should create a new one") + } if c.Started { return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") } @@ -156,15 +211,33 @@ func (c *Controller) Start(ctx context.Context) error { c.initMetrics() // Set the internal context. - c.ctx = ctx + var cancelAllSources context.CancelFunc + c.ctx, cancelAllSources = context.WithCancel(ctx) + + wg := &sync.WaitGroup{} c.Queue = c.MakeQueue() - go func() { - <-ctx.Done() - c.Queue.ShutDown() + defer func() { + var startedSources []cancelableSource + c.mu.Lock() + c.Stopped = true + startedSources = c.startedSources + c.mu.Unlock() + + c.Queue.ShutDown() // Stop receiving new items in the queue + + cancelAllSources() // cancel the context to stop all the sources + for _, src := range startedSources { + if err := src.Shutdown(); err != nil { + c.LogConstructor(nil).Error(err, "Failed to stop watch source when controller stopping", "source", src) + } + } + c.LogConstructor(nil).Info("All watch sources finished") + + wg.Wait() // Wait for workers to finish + c.LogConstructor(nil).Info("All workers finished") }() - wg := &sync.WaitGroup{} err := func() error { defer c.mu.Unlock() @@ -177,7 +250,7 @@ func (c *Controller) Start(ctx context.Context) error { for _, watch := range c.startWatches { c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src)) - if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { + if err := c.startWatch(watch.src); err != nil { return err } } @@ -238,8 +311,6 @@ func (c *Controller) Start(ctx context.Context) error { <-ctx.Done() c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish") - wg.Wait() - c.LogConstructor(nil).Info("All workers finished") return nil } diff --git a/pkg/internal/controller/controller_test.go b/pkg/internal/controller/controller_test.go index ce2245e60f..5b05f9ea3d 100644 --- a/pkg/internal/controller/controller_test.go +++ b/pkg/internal/controller/controller_test.go @@ -42,7 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -127,7 +126,7 @@ var _ = Describe("controller", func() { It("should return an error if there is an error waiting for the informers", func() { f := false ctrl.startWatches = []watchDescription{{ - src: source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}), + src: source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.EnqueueRequestForObject{}), }} ctrl.Name = "foo" ctx, cancel := context.WithCancel(context.Background()) @@ -145,13 +144,13 @@ var _ = Describe("controller", func() { c = &cacheWithIndefinitelyBlockingGetInformer{c} ctrl.startWatches = []watchDescription{{ - src: source.Kind(c, &appsv1.Deployment{}), + src: source.Kind(c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), }} ctrl.Name = "testcontroller" err = ctrl.Start(context.TODO()) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("failed to wait for testcontroller caches to sync: timed out waiting for cache to be synced")) + Expect(err.Error()).To(ContainSubstring("timed out trying to get an informer from cache for Kind *v1.Deployment")) }) It("should not error when context cancelled", func() { @@ -163,7 +162,7 @@ var _ = Describe("controller", func() { c = &cacheWithIndefinitelyBlockingGetInformer{c} ctrl.startWatches = []watchDescription{{ src: &singnallingSourceWrapper{ - SyncingSource: source.Kind(c, &appsv1.Deployment{}), + SyncingSource: source.Kind(c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), cacheSyncDone: sourceSynced, }, }} @@ -191,7 +190,7 @@ var _ = Describe("controller", func() { Expect(err).NotTo(HaveOccurred()) ctrl.startWatches = []watchDescription{{ src: &singnallingSourceWrapper{ - SyncingSource: source.Kind(c, &appsv1.Deployment{}), + SyncingSource: source.Kind(c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), cacheSyncDone: sourceSynced, }, }} @@ -226,20 +225,21 @@ var _ = Describe("controller", func() { Object: p, } - ins := &source.Channel{Source: ch} - ins.DestBufferSize = 1 + ins := source.Channel( + source.NewChannelBroadcaster(ch), + handler.Funcs{ + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + close(processed) + }, + }, + ) // send the event to the channel ch <- evt ctrl.startWatches = []watchDescription{{ src: ins, - handler: handler.Funcs{ - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { - defer GinkgoRecover() - close(processed) - }, - }, }} go func() { @@ -249,35 +249,50 @@ var _ = Describe("controller", func() { <-processed }) - It("should error when channel source is not specified", func() { + It("should error when ChannelBroadcaster is not specified", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ins := source.Channel( + nil, + &handler.EnqueueRequestForObject{}, + ) + ctrl.startWatches = []watchDescription{{ + src: ins, + }} + + e := ctrl.Start(ctx) + Expect(e).To(HaveOccurred()) + Expect(e.Error()).To(ContainSubstring("must create Channel with a non-nil Broadcaster")) + }) + + It("should error when channel eventhandler is not specified", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ins := &source.Channel{} + ins := source.Channel( + source.NewChannelBroadcaster(make(<-chan event.GenericEvent)), + nil, + ) ctrl.startWatches = []watchDescription{{ src: ins, }} e := ctrl.Start(ctx) Expect(e).To(HaveOccurred()) - Expect(e.Error()).To(ContainSubstring("must specify Channel.Source")) + Expect(e.Error()).To(ContainSubstring("must create Channel with a non-nil EventHandler")) }) It("should call Start on sources with the appropriate EventHandler, Queue, and Predicates", func() { - pr1 := &predicate.Funcs{} - pr2 := &predicate.Funcs{} - evthdl := &handler.EnqueueRequestForObject{} started := false - src := source.Func(func(ctx context.Context, e handler.EventHandler, q workqueue.RateLimitingInterface, p ...predicate.Predicate) error { + src := source.Func(func(ctx context.Context, q workqueue.RateLimitingInterface) error { defer GinkgoRecover() - Expect(e).To(Equal(evthdl)) Expect(q).To(Equal(ctrl.Queue)) - Expect(p).To(ConsistOf(pr1, pr2)) started = true return nil }) - Expect(ctrl.Watch(src, evthdl, pr1, pr2)).NotTo(HaveOccurred()) + Expect(ctrl.Watch(src)).NotTo(HaveOccurred()) // Use a cancelled context so Start doesn't block ctx, cancel := context.WithCancel(context.Background()) @@ -288,13 +303,11 @@ var _ = Describe("controller", func() { It("should return an error if there is an error starting sources", func() { err := fmt.Errorf("Expected Error: could not start source") - src := source.Func(func(context.Context, handler.EventHandler, - workqueue.RateLimitingInterface, - ...predicate.Predicate) error { + src := source.Func(func(context.Context, workqueue.RateLimitingInterface) error { defer GinkgoRecover() return err }) - Expect(ctrl.Watch(src, &handler.EnqueueRequestForObject{})).To(Succeed()) + Expect(ctrl.Watch(src)).To(Succeed()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -308,7 +321,7 @@ var _ = Describe("controller", func() { Expect(ctrl.Start(ctx)).To(Succeed()) err := ctrl.Start(ctx) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("controller was started more than once. This is likely to be caused by being added to a manager multiple times")) + Expect(err.Error()).To(Equal("can not restart a stopped controller, you should create a new one")) }) }) diff --git a/pkg/internal/recorder/recorder_integration_test.go b/pkg/internal/recorder/recorder_integration_test.go index 130a306053..f9a20737fc 100644 --- a/pkg/internal/recorder/recorder_integration_test.go +++ b/pkg/internal/recorder/recorder_integration_test.go @@ -56,7 +56,7 @@ var _ = Describe("recorder", func() { Expect(err).NotTo(HaveOccurred()) By("Watching Resources") - err = instance.Watch(source.Kind(cm.GetCache(), &appsv1.Deployment{}), &handler.EnqueueRequestForObject{}) + err = instance.Watch(source.Kind(cm.GetCache(), &appsv1.Deployment{}, &handler.EnqueueRequestForObject{})) Expect(err).NotTo(HaveOccurred()) By("Starting the Manager") diff --git a/pkg/internal/source/kind.go b/pkg/internal/source/kind.go deleted file mode 100644 index b3a8227125..0000000000 --- a/pkg/internal/source/kind.go +++ /dev/null @@ -1,117 +0,0 @@ -package internal - -import ( - "context" - "errors" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type client.Object - - // Cache used to watch APIs - Cache cache.Cache - - // started may contain an error if one was encountered during startup. If its closed and does not - // contain an error, startup and syncing finished. - started chan error - startCancel func() -} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - if ks.Type == nil { - return fmt.Errorf("must create Kind with a non-nil object") - } - if ks.Cache == nil { - return fmt.Errorf("must create Kind with a non-nil cache") - } - - // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not - // sync that informer (most commonly due to RBAC issues). - ctx, ks.startCancel = context.WithCancel(ctx) - ks.started = make(chan error) - go func() { - var ( - i cache.Informer - lastErr error - ) - - // Tries to get an informer until it returns true, - // an error or the specified context is cancelled or expired. - if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) - if lastErr != nil { - kindMatchErr := &meta.NoKindMatchError{} - switch { - case errors.As(lastErr, &kindMatchErr): - log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - case runtime.IsNotRegisteredError(lastErr): - log.Error(lastErr, "kind must be registered to the Scheme") - default: - log.Error(lastErr, "failed to get informer from cache") - } - return false, nil // Retry. - } - return true, nil - }); err != nil { - if lastErr != nil { - ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) - return - } - ks.started <- err - return - } - - _, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) - if err != nil { - ks.started <- err - return - } - if !ks.Cache.WaitForCacheSync(ctx) { - // Would be great to return something more informative here - ks.started <- errors.New("cache did not sync") - } - close(ks.started) - }() - - return nil -} - -func (ks *Kind) String() string { - if ks.Type != nil { - return fmt.Sprintf("kind source: %T", ks.Type) - } - return "kind source: unknown type" -} - -// WaitForSync implements SyncingSource to allow controllers to wait with starting -// workers until the cache is synced. -func (ks *Kind) WaitForSync(ctx context.Context) error { - select { - case err := <-ks.started: - return err - case <-ctx.Done(): - ks.startCancel() - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) - } -} diff --git a/pkg/predicate/predicate.go b/pkg/predicate/predicate.go index 314635875e..b50c0c529e 100644 --- a/pkg/predicate/predicate.go +++ b/pkg/predicate/predicate.go @@ -177,9 +177,13 @@ func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { // It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: // // Controller.Watch( -// &source.Kind{Type: v1.MyCustomKind}, -// &handler.EnqueueRequestForObject{}, -// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// source.Kind( +// mgr.GetCache(), +// v1.MyCustomKind, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// ), +// ) // // This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented // (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). @@ -206,11 +210,14 @@ func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { // This predicate will skip update events that have no change in the object's label. // It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: // -// Controller.Watch( -// -// &source.Kind{Type: v1.MyCustomKind}, -// &handler.EnqueueRequestForObject{}, -// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// Controller.Watch( +// source.Kind( +// mgr.GetCache(), +// v1.MyCustomKind, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// ), +// ) // // This will be helpful when object's labels is carrying some extra specification information beyond object's spec, // and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. diff --git a/pkg/source/example_test.go b/pkg/source/example_test.go index 77857729de..4a668f7158 100644 --- a/pkg/source/example_test.go +++ b/pkg/source/example_test.go @@ -31,7 +31,7 @@ var ctrl controller.Controller // This example Watches for Pod Events (e.g. Create / Update / Delete) and enqueues a reconcile.Request // with the Name and Namespace of the Pod. func ExampleKind() { - err := ctrl.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), &handler.EnqueueRequestForObject{}) + err := ctrl.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.EnqueueRequestForObject{})) if err != nil { // handle it } @@ -43,8 +43,10 @@ func ExampleChannel() { events := make(chan event.GenericEvent) err := ctrl.Watch( - &source.Channel{Source: events}, - &handler.EnqueueRequestForObject{}, + source.Channel( + source.NewChannelBroadcaster(events), + &handler.EnqueueRequestForObject{}, + ), ) if err != nil { // handle it diff --git a/pkg/source/internal/channel.go b/pkg/source/internal/channel.go new file mode 100644 index 0000000000..418fd7c4e7 --- /dev/null +++ b/pkg/source/internal/channel.go @@ -0,0 +1,159 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "sync" + + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// ChannelOptions contains the options for the Channel source. +type ChannelOptions struct { + // DestBufferSize is the specified buffer size of dest channels. + // Default to 1024 if not specified. + DestBufferSize int +} + +// Channel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (eh.g. http handler) to write GenericEvents to the underlying channel. +type Channel struct { + Options ChannelOptions + + // Broadcaster contains the source channel for events. + Broadcaster *ChannelBroadcaster + + // EventHandler is the handler to call when events are received. + EventHandler handler.EventHandler + + mu sync.Mutex + // isStarted is true if Start has been called. + // - A source can only be started once. + // - WaitForSync can only be called after the source is started. + // - Shutdown will exit early if the source was never started. + isStarted bool + // shuttingDown is true if Shutdown has been called. + // - If true, Start will exit early. + shuttingDown bool + + // doneCh is closed when the source stopped listening to the broadcaster. + doneCh chan struct{} +} + +func (cs *Channel) String() string { + return fmt.Sprintf("channel source: %p", cs) +} + +// Start is internal and should be called only by the Controller to start a source which enqueue reconcile.Requests. +// It should NOT block, instead, it should start a goroutine and return immediately. +// The context passed to Start can be used to cancel this goroutine. After the context is canceled, Shutdown can be +// called to wait for the goroutine to exit. +// Start can be called only once, it is thus not possible to share a single source between multiple controllers. +func (cs *Channel) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { + if cs.Broadcaster == nil { + return fmt.Errorf("must create Channel with a non-nil Broadcaster") + } + if cs.EventHandler == nil { + return fmt.Errorf("must create Channel with a non-nil EventHandler") + } + + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.isStarted { + return fmt.Errorf("cannot start an already started Channel source") + } + if cs.shuttingDown { + return nil + } + cs.isStarted = true + + // Create a destination channel for the event handler + // and add it to the list of destinations + destination := make(chan event.GenericEvent, cs.Options.DestBufferSize) + cs.Broadcaster.AddListener(destination) + + cs.doneCh = make(chan struct{}) + go func() { + defer close(cs.doneCh) + // Remove the listener and wait for the broadcaster + // to stop sending events to the destination channel. + defer cs.Broadcaster.RemoveListener(destination) + + cs.processReceivedEvents( + ctx, + destination, + queue, + ) + }() + + return nil +} + +func (cs *Channel) processReceivedEvents( + ctx context.Context, + destination <-chan event.GenericEvent, + queue workqueue.RateLimitingInterface, +) { + for { + select { + case <-ctx.Done(): + return + case event, stillOpen := <-destination: + if !stillOpen { + return + } + + // Call the event handler with the event. + cs.EventHandler.Generic(ctx, event, queue) + } + } +} + +// Shutdown marks a Source as shutting down. At that point no new +// informers can be started anymore and Start will return without +// doing anything. +// +// In addition, Shutdown blocks until all goroutines have terminated. For that +// to happen, the close channel(s) that they were started with must be closed, +// either before Shutdown gets called or while it is waiting. +// +// Shutdown may be called multiple times, even concurrently. All such calls will +// block until all goroutines have terminated. +func (cs *Channel) Shutdown() error { + if func() bool { + cs.mu.Lock() + defer cs.mu.Unlock() + + // Ensure that when we release the lock, we stop an in-process & future calls to Start(). + cs.shuttingDown = true + + // If we haven't started yet, there's nothing to stop. + return !cs.isStarted + }() { + return nil + } + + // Wait for the listener goroutine to exit. + <-cs.doneCh + + return nil +} diff --git a/pkg/source/internal/channel_broadcast.go b/pkg/source/internal/channel_broadcast.go new file mode 100644 index 0000000000..cb7c9431c6 --- /dev/null +++ b/pkg/source/internal/channel_broadcast.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// ChannelBroadcaster is a wrapper around a channel that allows multiple listeners to all +// receive the events from the channel. +type ChannelBroadcaster struct { + source <-chan event.GenericEvent + + mu sync.Mutex + rcCount uint + managementCh chan managementMsg + doneCh chan struct{} +} + +type managementOperation bool + +const ( + addChannel managementOperation = true + removeChannel managementOperation = false +) + +type managementMsg struct { + operation managementOperation + ch chan event.GenericEvent +} + +// NewChannelBroadcaster creates a new ChannelBroadcaster for the given channel. +func NewChannelBroadcaster(source <-chan event.GenericEvent) *ChannelBroadcaster { + return &ChannelBroadcaster{ + source: source, + } +} + +// AddListener adds a new listener to the ChannelBroadcaster. Each listener +// will receive all events from the source channel. All listeners have to be +// removed using RemoveListener before the ChannelBroadcaster can be garbage +// collected. +func (sc *ChannelBroadcaster) AddListener(ch chan event.GenericEvent) { + var managementCh chan managementMsg + var doneCh chan struct{} + isFirst := false + func() { + sc.mu.Lock() + defer sc.mu.Unlock() + + isFirst = sc.rcCount == 0 + sc.rcCount++ + + if isFirst { + sc.managementCh = make(chan managementMsg) + sc.doneCh = make(chan struct{}) + } + + managementCh = sc.managementCh + doneCh = sc.doneCh + }() + + if isFirst { + go startLoop(sc.source, managementCh, doneCh) + } + + // If the goroutine is not yet stopped, send a message to add the + // destination channel. The routine might be stopped already because + // the source channel was closed. + select { + case <-doneCh: + default: + managementCh <- managementMsg{ + operation: addChannel, + ch: ch, + } + } +} + +func startLoop( + source <-chan event.GenericEvent, + managementCh chan managementMsg, + doneCh chan struct{}, +) { + defer close(doneCh) + + var destinations []chan event.GenericEvent + + // Close all remaining destinations in case the Source channel is closed. + defer func() { + for _, dst := range destinations { + close(dst) + } + }() + + // Wait for the first destination to be added before starting the loop. + for len(destinations) == 0 { + managementMsg := <-managementCh + if managementMsg.operation == addChannel { + destinations = append(destinations, managementMsg.ch) + } + } + + for { + select { + case msg := <-managementCh: + + switch msg.operation { + case addChannel: + destinations = append(destinations, msg.ch) + case removeChannel: + SearchLoop: + for i, dst := range destinations { + if dst == msg.ch { + destinations = append(destinations[:i], destinations[i+1:]...) + close(dst) + break SearchLoop + } + } + + if len(destinations) == 0 { + return + } + } + + case evt, stillOpen := <-source: + if !stillOpen { + return + } + + for _, dst := range destinations { + // We cannot make it under goroutine here, or we'll meet the + // race condition of writing message to closed channels. + // To avoid blocking, the dest channels are expected to be of + // proper buffer size. If we still see it blocked, then + // the controller is thought to be in an abnormal state. + dst <- evt + } + } + } +} + +// RemoveListener removes a listener from the ChannelBroadcaster. The listener +// will no longer receive events from the source channel. If this is the last +// listener, this function will block until the ChannelBroadcaster's is stopped. +func (sc *ChannelBroadcaster) RemoveListener(ch chan event.GenericEvent) { + var managementCh chan managementMsg + var doneCh chan struct{} + isLast := false + func() { + sc.mu.Lock() + defer sc.mu.Unlock() + + sc.rcCount-- + isLast = sc.rcCount == 0 + + managementCh = sc.managementCh + doneCh = sc.doneCh + }() + + // If the goroutine is not yet stopped, send a message to remove the + // destination channel. The routine might be stopped already because + // the source channel was closed. + select { + case <-doneCh: + default: + managementCh <- managementMsg{ + operation: removeChannel, + ch: ch, + } + } + + // Wait for the doneCh to be closed (in case we are the last one) + if isLast { + <-doneCh + } + + // Wait for the destination channel to be closed. + <-ch +} diff --git a/pkg/internal/source/event_handler.go b/pkg/source/internal/event_handler.go similarity index 88% rename from pkg/internal/source/event_handler.go rename to pkg/source/internal/event_handler.go index ae8404a1fa..43edadb977 100644 --- a/pkg/internal/source/event_handler.go +++ b/pkg/source/internal/event_handler.go @@ -26,19 +26,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - - "sigs.k8s.io/controller-runtime/pkg/predicate" ) var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") // NewEventHandler creates a new EventHandler. -func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler { +func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler) *EventHandler { return &EventHandler{ - ctx: ctx, - handler: handler, - queue: queue, - predicates: predicates, + ctx: ctx, + handler: handler, + queue: queue, } } @@ -48,9 +45,8 @@ type EventHandler struct { // that is used to propagate cancellation signals to each handler function. ctx context.Context - handler handler.EventHandler - queue workqueue.RateLimitingInterface - predicates []predicate.Predicate + handler handler.EventHandler + queue workqueue.RateLimitingInterface } // HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs @@ -76,12 +72,6 @@ func (e *EventHandler) OnAdd(obj interface{}) { return } - for _, p := range e.predicates { - if !p.Create(c) { - return - } - } - // Invoke create handler ctx, cancel := context.WithCancel(e.ctx) defer cancel() @@ -109,12 +99,6 @@ func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - for _, p := range e.predicates { - if !p.Update(u) { - return - } - } - // Invoke update handler ctx, cancel := context.WithCancel(e.ctx) defer cancel() @@ -157,12 +141,6 @@ func (e *EventHandler) OnDelete(obj interface{}) { return } - for _, p := range e.predicates { - if !p.Delete(d) { - return - } - } - // Invoke delete handler ctx, cancel := context.WithCancel(e.ctx) defer cancel() diff --git a/pkg/source/internal/func.go b/pkg/source/internal/func.go new file mode 100644 index 0000000000..cd8e7641b3 --- /dev/null +++ b/pkg/source/internal/func.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "sync" + + "k8s.io/client-go/util/workqueue" +) + +// StartFunc is a function that starts a goroutine and returns immediately. +type StartFunc func(context.Context, workqueue.RateLimitingInterface) error + +// FuncOptions contains the options for the Func source. +type FuncOptions struct { + // ShutdownFunc is an optional function that is called when the source + // is shutting down. It should block until all goroutines have finished. + ShutdownFunc func() error +} + +// FuncSource is a source which is implemented by the provided Func. +type FuncSource struct { + StartFunc StartFunc + Options FuncOptions + + mu sync.RWMutex + // shuttingDown is true if the source has been shuttingDown and causes any following/ in-progress + // Start calls to no-op. + shuttingDown bool + // isStarted is true if the source has been started. A source can only be started once. + isStarted bool +} + +// Start is internal and should be called only by the Controller to start a source which +// enqueues reconcile.Requests. +// It should NOT block, instead, it should start a goroutine and return immediately. The +// context passed to Start can be used to cancel this goroutine. Shutdown can be called +// to wait for the goroutine to exit. +// Start can be called only once, it is thus not possible to share a single source between +// multiple controllers. +func (f *FuncSource) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { + if f.StartFunc == nil { + return fmt.Errorf("must create FuncSource with a non-nil StartFunc") + } + + f.mu.Lock() + defer f.mu.Unlock() + if f.isStarted { + return fmt.Errorf("cannot start an already started source") + } + if f.shuttingDown { + return nil + } + f.isStarted = true + + return f.StartFunc(ctx, queue) +} + +func (f *FuncSource) String() string { + return "Func" +} + +// Shutdown marks a Source as shutting down. At that point the source cannot +// be started anymore, Start will no-op and return immediately. +// +// In addition, Shutdown blocks until all goroutines have finished. This must +// happen either before Shutdown gets called or while it is waiting. To trigger +// the termination of the goroutines, the context passed to Start should be +// canceled. +// +// Shutdown may be called multiple times, even concurrently. All such calls will +// block until all goroutines have terminated. +func (f *FuncSource) Shutdown() error { + if func() bool { + f.mu.RLock() + defer f.mu.RUnlock() + + // Ensure that when we release the lock, we stop an in-process & future calls to Start(). + f.shuttingDown = true + + return !f.isStarted + }() { + return nil + } + + if f.Options.ShutdownFunc == nil { + return nil + } + + return f.Options.ShutdownFunc() +} diff --git a/pkg/source/internal/informer.go b/pkg/source/internal/informer.go new file mode 100644 index 0000000000..61a3495c18 --- /dev/null +++ b/pkg/source/internal/informer.go @@ -0,0 +1,179 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "errors" + "fmt" + "sync" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Informer struct { + // Informer is the controller-runtime Informer + Informer cache.Informer + + // EventHandler is the handler to call when events are received. + EventHandler handler.EventHandler + + mu sync.RWMutex + // isStarted is true if Start has been called. + // - A source can only be started once. + // - WaitForSync can only be called after the source is started. + // - Shutdown will exit early if the source was never started. + isStarted bool + // shuttingDown is true if Shutdown has been called. + // - If true, Start will exit early. + shuttingDown bool + + // startupErr may contain an error if one was encountered during startup. + startupErr startupErr + + // registration is the registered EventHandler handle. + registration toolscache.ResourceEventHandlerRegistration +} + +// Start is internal and should be called only by the Controller to start a source which enqueue reconcile.Requests. +// It should NOT block, instead, it should start a goroutine and return immediately. +// The context passed to Start can be used to cancel this goroutine. After the context is canceled, Shutdown can be +// called to wait for the goroutine to exit. +// Start can be called only once, it is thus not possible to share a single source between multiple controllers. +func (is *Informer) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { + if is.Informer == nil { + return fmt.Errorf("must create Informer with a non-nil Informer") + } + if is.EventHandler == nil { + return fmt.Errorf("must create Informer with a non-nil EventHandler") + } + + is.mu.Lock() + defer is.mu.Unlock() + if is.isStarted { + return fmt.Errorf("cannot start an already started Informer source") + } + if is.shuttingDown { + return nil + } + is.isStarted = true + + registration, err := is.Informer.AddEventHandler(NewEventHandler(ctx, queue, is.EventHandler).HandlerFuncs()) + if err != nil { + is.startupErr = startupErr{ + isCanceled: errors.Is(ctx.Err(), context.Canceled), + err: fmt.Errorf("failed to add EventHandler to informer: %w", err), + } + } + is.registration = registration + + return nil +} + +func (is *Informer) String() string { + return fmt.Sprintf("informer source: %p", is.Informer) +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +// +// WaitForSync blocks until the cache is synced or the passed context is canceled. +// If the passed context is canceled, with a non-context.Canceled error, WaitForSync +// will return an error. Also, when the cache is stopped before it is synced, this +// function will remain blocked until the passed context is canceled. +func (is *Informer) WaitForSync(ctx context.Context) error { + if func() bool { + is.mu.RLock() + defer is.mu.RUnlock() + + return !is.isStarted + }() { + return fmt.Errorf("cannot wait for sync on an unstarted source") + } + + // If the startup function was successful, we will wait for the cache to sync. + if is.startupErr.err == nil { + if err := wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(_ context.Context) (bool, error) { + return is.registration.HasSynced(), nil + }); err != nil { + return fmt.Errorf("informer did not sync in time: %w", err) + } + + // Happy path: the cache has synced. + return nil + } + + // We got a startup error, so we will block until the context is canceled. + <-ctx.Done() + + // Return a timeout error if the context was not cancelled. + if !errors.Is(ctx.Err(), context.Canceled) { + return fmt.Errorf("timed out waiting for informer that failed to start") + } + + // If the context was cancelled, we return nil. + // This might happen if the controller is stopped or just the WaitForSync call is cancelled. + // In both cases, the Shutdown method should be used to retrieve errors and clean up resources. + return nil +} + +// Shutdown marks a Source as shutting down. At that point no new +// informers can be started anymore and Start will return without +// doing anything. +// +// In addition, Shutdown blocks until all goroutines have terminated. For that +// to happen, the close channel(s) that they were started with must be closed, +// either before Shutdown gets called or while it is waiting. +// +// Shutdown may be called multiple times, even concurrently. All such calls will +// block until all goroutines have terminated. +func (is *Informer) Shutdown() error { + if func() bool { + is.mu.RLock() + defer is.mu.RUnlock() + + // Ensure that when we release the lock, we stop an in-process & future calls to Start(). + is.shuttingDown = true + + return !is.isStarted + }() { + return nil + } + + var errs []error + + // Check if we have any startup errors. We ignore the errors if the context was canceled, because it means that the + // source was stopped by the user. + if is.startupErr.err != nil && !is.startupErr.isCanceled { + errs = append(errs, fmt.Errorf("failed to start source: %w", is.startupErr.err)) + } + + // Remove event handler if it was registered. + if is.Informer != nil && is.registration != nil { + if err := is.Informer.RemoveEventHandler(is.registration); err != nil { + errs = append(errs, fmt.Errorf("failed to stop source: %w", err)) + } + } + + return kerrors.NewAggregate(errs) +} diff --git a/pkg/internal/source/internal_suite_test.go b/pkg/source/internal/internal_suite_test.go similarity index 100% rename from pkg/internal/source/internal_suite_test.go rename to pkg/source/internal/internal_suite_test.go diff --git a/pkg/internal/source/internal_test.go b/pkg/source/internal/internal_test.go similarity index 86% rename from pkg/internal/source/internal_test.go rename to pkg/source/internal/internal_test.go index 0574f7180e..6446fc3d86 100644 --- a/pkg/internal/source/internal_test.go +++ b/pkg/source/internal/internal_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - internal "sigs.k8s.io/controller-runtime/pkg/internal/source" + internal "sigs.k8s.io/controller-runtime/pkg/source/internal" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -74,7 +74,7 @@ var _ = Describe("Internal", func() { set = true }, } - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, funcs, nil) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, funcs) }) Describe("EventHandler", func() { @@ -99,41 +99,47 @@ var _ = Describe("Internal", func() { }) It("should used Predicates to filter CreateEvents", func() { - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, - }) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, + handler.WithPredicates(setfuncs, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}), + ) set = false instance.OnAdd(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - }) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, + handler.WithPredicates(setfuncs, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}), + ) instance.OnAdd(pod) Expect(set).To(BeTrue()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, - }) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, + handler.WithPredicates(setfuncs, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, + ), + ) instance.OnAdd(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - }) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, + handler.WithPredicates(setfuncs, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + ), + ) instance.OnAdd(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - }) + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, + handler.WithPredicates(setfuncs, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + ), + ) instance.OnAdd(pod) Expect(set).To(BeTrue()) }) @@ -157,40 +163,40 @@ var _ = Describe("Internal", func() { It("should used Predicates to filter UpdateEvents", func() { set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{UpdateFunc: func(updateEvent event.UpdateEvent) bool { return false }}, - }) + )) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, - }) + )) instance.OnUpdate(pod, newPod) Expect(set).To(BeTrue()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, - }) + )) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, - }) + )) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - }) + )) instance.OnUpdate(pod, newPod) Expect(set).To(BeTrue()) }) @@ -215,40 +221,40 @@ var _ = Describe("Internal", func() { It("should used Predicates to filter DeleteEvents", func() { set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, - }) + )) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - }) + )) instance.OnDelete(pod) Expect(set).To(BeTrue()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, - }) + )) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - }) + )) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, handler.WithPredicates(setfuncs, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - }) + )) instance.OnDelete(pod) Expect(set).To(BeTrue()) }) diff --git a/pkg/source/internal/kind.go b/pkg/source/internal/kind.go new file mode 100644 index 0000000000..88980c56cb --- /dev/null +++ b/pkg/source/internal/kind.go @@ -0,0 +1,296 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// syncedPollPeriod controls how often you look at the status of your sync funcs +const syncedPollPeriod = 100 * time.Millisecond + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + // Cache used to watch APIs + Cache cache.Cache + + // EventHandler is the handler to call when events are received. + EventHandler handler.EventHandler + + mu sync.RWMutex + // isStarted is true if Start has been called. + // - A source can only be started once. + // - WaitForSync can only be called after the source is started. + // - Shutdown will exit early if the source was never started. + isStarted bool + // shuttingDown is true if Shutdown has been called. + // - If true, Start will exit early. + shuttingDown bool + + // startupDoneCh will close once the startup goroutine has finished running. If the + // startupErr is nil, the source has finished starting up and is ready to be used. + startupDoneCh chan struct{} + // startupErr contains an error if one was encountered during startup. + startupErr atomic.Value + + // informer is the informer that we obtained from the cache. + informer cache.Informer + // registration is the registered EventHandler handle. + registration toolscache.ResourceEventHandlerRegistration +} + +type startupErr struct { + isCanceled bool + err error +} + +// Start is internal and should be called only by the Controller to start a source which enqueue reconcile.Requests. +// It should NOT block, instead, it should start a goroutine and return immediately. +// The context passed to Start can be used to cancel this goroutine. After the context is canceled, Shutdown can be +// called to wait for the goroutine to exit. +// Start can be called only once, it is thus not possible to share a single source between multiple controllers. +func (ks *Kind) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { + if ks.Type == nil { + return fmt.Errorf("must create Kind with a non-nil Type") + } + if ks.Cache == nil { + return fmt.Errorf("must create Kind with a non-nil Cache") + } + if ks.EventHandler == nil { + return fmt.Errorf("must create Kind with a non-nil EventHandler") + } + + ks.mu.Lock() + defer ks.mu.Unlock() + if ks.isStarted { + return fmt.Errorf("cannot start an already started Kind source") + } + if ks.shuttingDown { + return nil + } + ks.isStarted = true + + ks.startupDoneCh = make(chan struct{}) + go func() { + defer close(ks.startupDoneCh) + + err := ks.registerEventHandler( + ctx, + NewEventHandler(ctx, queue, ks.EventHandler).HandlerFuncs(), + ) + if err != nil { + ks.startupErr.Store(startupErr{ + isCanceled: errors.Is(ctx.Err(), context.Canceled), + err: err, + }) + } + }() + + return nil +} + +func getInformer(ctx context.Context, informerCache cache.Cache, resourceType client.Object) (cache.Informer, error) { + var informer cache.Informer + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(pollCtx context.Context) (bool, error) { + // Lookup the Informer from the Cache. + var err error + informer, err = informerCache.GetInformer(pollCtx, resourceType, cache.BlockUntilSynced(false)) + if err != nil { + kindMatchErr := &meta.NoKindMatchError{} + discoveryErr := &discovery.ErrGroupDiscoveryFailed{} + switch { + case errors.As(err, &kindMatchErr): + // We got a NoKindMatchError, which means the kind is a CRD and it's not installed yet. + // We should retry until it's installed. + log.Error(err, "waiting for CRD to be installed", "groupKind", kindMatchErr.GroupKind) + return false, nil // Retry. + case errors.As(err, &discoveryErr): + // We got a ErrGroupDiscoveryFailed, which means the kind is a CRD and it's not installed yet. + // We should retry until it's installed. + for gv, err := range discoveryErr.Groups { + log.Error(err, "waiting for CRD to be installed", "groupVersion", gv) + } + return false, nil // Retry. + case runtime.IsNotRegisteredError(err): + // We got a IsNotRegisteredError, which means the kind is not registered to the Scheme. + // This is a programming error, so we should stop retrying and return the error. + return true, fmt.Errorf("kind must be registered to the Scheme: %w", err) + default: + // We got an error that is not a NoKindMatchError or IsNotRegisteredError, so we should + // stop retrying and return the error. + return true, fmt.Errorf("failed to get informer from cache: %w", err) + } + } + return true, nil + }); err != nil { + return nil, err + } + + return informer, nil +} + +func (ks *Kind) registerEventHandler(ctx context.Context, eventHandler toolscache.ResourceEventHandlerFuncs) error { + informer, err := getInformer(ctx, ks.Cache, ks.Type) + if err != nil { + return err + } + + // We want to prevent unnecessarily registering the eventHandler after + // the source has been shut down. Then we return early. + if func() bool { + ks.mu.RLock() + defer ks.mu.RUnlock() + + return ks.shuttingDown + }() { + return nil + } + + // Save the informer so that we use it to unregister the event handler in Stop. + ks.informer = informer + + // Register the event handler and save the registration so that we can unregister it in Stop. + registration, err := ks.informer.AddEventHandler(eventHandler) + if err != nil { + return err + } + ks.registration = registration + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +// +// WaitForSync blocks until the cache is synced or the passed context is canceled. +// If the passed context is canceled, with a non-context.Canceled error, WaitForSync +// will return an error. Also, when the cache is stopped before it is synced, this +// function will remain blocked until the passed context is canceled. +func (ks *Kind) WaitForSync(ctx context.Context) error { + if func() bool { + ks.mu.RLock() + defer ks.mu.RUnlock() + + return !ks.isStarted + }() { + return fmt.Errorf("cannot wait for sync on an unstarted source") + } + + select { + case <-ctx.Done(): + // do nothing + case <-ks.startupDoneCh: + // The startup goroutine has finished running. + + // If the startup was successful, we will wait for the cache to be synced. + if startErr, _ := ks.startupErr.Load().(startupErr); startErr.err == nil { + if err := wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(_ context.Context) (bool, error) { + return ks.registration.HasSynced(), nil + }); err != nil { + return fmt.Errorf("informer did not sync in time: %w", err) + } + + // Happy path: the startup goroutine returned without an error and the cache was synced. + return nil + } + } + + // Wait for the context to be cancelled (in case the startup was completed with an error). + <-ctx.Done() + + // Return a timeout error if the context was not cancelled. + if !errors.Is(ctx.Err(), context.Canceled) { + return fmt.Errorf("timed out trying to get an informer from cache for Kind %T", ks.Type) + } + + // If the context was cancelled, we return nil. + // This might happen if the controller is stopped or just the WaitForSync call is cancelled. + // In both cases, the Shutdown method should be used to retrieve errors and clean up resources. + return nil +} + +// Shutdown marks a Source as shutting down. At that point no new +// informers can be started anymore and Start will return without +// doing anything. +// +// In addition, Shutdown blocks until all goroutines have terminated. For that +// to happen, the close channel(s) that they were started with must be closed, +// either before Shutdown gets called or while it is waiting. +// +// Shutdown may be called multiple times, even concurrently. All such calls will +// block until all goroutines have terminated. +func (ks *Kind) Shutdown() error { + if func() bool { + ks.mu.Lock() + defer ks.mu.Unlock() + + // Ensure that when we release the lock, we stop an in-process & future calls to Start(). + ks.shuttingDown = true + + return !ks.isStarted + }() { + return nil + } + + // Wait for the started channel to be closed. + <-ks.startupDoneCh + + var errs []error + + // Check if we have any startup errors. We ignore the errors if the context was canceled, + // because it means that the source was stopped by the user. + if startErr, _ := ks.startupErr.Load().(startupErr); startErr.err != nil && !startErr.isCanceled { + errs = append(errs, fmt.Errorf("failed to start source: %w", startErr.err)) + } + + // Remove event handler if it was registered. + if ks.informer != nil && ks.registration != nil { + if err := ks.informer.RemoveEventHandler(ks.registration); err != nil { + errs = append(errs, fmt.Errorf("failed to stop source: %w", err)) + } + } + + return kerrors.NewAggregate(errs) +} diff --git a/pkg/source/source.go b/pkg/source/source.go index 099c8d68fa..40eed9270d 100644 --- a/pkg/source/source.go +++ b/pkg/source/source.go @@ -18,22 +18,13 @@ package source import ( "context" - "fmt" - "sync" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - internal "sigs.k8s.io/controller-runtime/pkg/internal/source" + internal "sigs.k8s.io/controller-runtime/pkg/source/internal" "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -const ( - // defaultBufferSize is the default number of event notifications that can be buffered. - defaultBufferSize = 1024 ) // Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) @@ -45,9 +36,23 @@ const ( // // Users may build their own Source implementations. type Source interface { - // Start is internal and should be called only by the Controller to register an EventHandler with the Informer - // to enqueue reconcile.Requests. - Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error + // Start is internal and should be called only by the Controller to start a goroutine that enqueues + // reconcile.Requests. It should NOT block, instead, it should start a goroutine and return immediately. + // The context passed to Start can be used to cancel the blocking operations in the Start method. To cancel the + // goroutine/ shutdown the source a user should call Stop. + Start(context.Context, workqueue.RateLimitingInterface) error + + // Shutdown marks a Source as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() error } // SyncingSource is a source that needs syncing prior to being usable. The controller @@ -57,169 +62,79 @@ type SyncingSource interface { WaitForSync(ctx context.Context) error } -// Kind creates a KindSource with the given cache provider. -func Kind(cache cache.Cache, object client.Object) SyncingSource { - return &internal.Kind{Type: object, Cache: cache} -} - -var _ Source = &Channel{} - -// Channel is used to provide a source of events originating outside the cluster -// (e.g. GitHub Webhook callback). Channel requires the user to wire the external -// source (eh.g. http handler) to write GenericEvents to the underlying channel. -type Channel struct { - // once ensures the event distribution goroutine will be performed only once - once sync.Once - - // Source is the source channel to fetch GenericEvents - Source <-chan event.GenericEvent - - // dest is the destination channels of the added event handlers - dest []chan event.GenericEvent +var _ Source = &internal.Kind{} +var _ SyncingSource = &internal.Kind{} +var _ Source = &internal.Informer{} +var _ SyncingSource = &internal.Informer{} +var _ Source = &internal.Channel{} +var _ Source = &internal.FuncSource{} - // DestBufferSize is the specified buffer size of dest channels. - // Default to 1024 if not specified. - DestBufferSize int - - // destLock is to ensure the destination channels are safely added/removed - destLock sync.Mutex +// Kind creates a KindSource with the given cache provider. +func Kind(cache cache.Cache, object client.Object, eventhandler handler.EventHandler) SyncingSource { + return &internal.Kind{Cache: cache, Type: object, EventHandler: eventhandler} } -func (cs *Channel) String() string { - return fmt.Sprintf("channel source: %p", cs) -} +// NewChannelBroadcaster creates a new ChannelBroadcaster for the given channel. +// A ChannelBroadcaster is a wrapper around a channel that allows multiple listeners to all +// receive the events from the channel. +var NewChannelBroadcaster = internal.NewChannelBroadcaster -// Start implements Source and should only be called by the Controller. -func (cs *Channel) Start( - ctx context.Context, - handler handler.EventHandler, - queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Source should have been specified by the user. - if cs.Source == nil { - return fmt.Errorf("must specify Channel.Source") - } +// ChannelOption is a functional option for configuring a Channel source. +type ChannelOption func(*internal.ChannelOptions) - // use default value if DestBufferSize not specified - if cs.DestBufferSize == 0 { - cs.DestBufferSize = defaultBufferSize - } - - dst := make(chan event.GenericEvent, cs.DestBufferSize) - - cs.destLock.Lock() - cs.dest = append(cs.dest, dst) - cs.destLock.Unlock() - - cs.once.Do(func() { - // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source - go cs.syncLoop(ctx) - }) - - go func() { - for evt := range dst { - shouldHandle := true - for _, p := range prct { - if !p.Generic(evt) { - shouldHandle = false - break - } - } - - if shouldHandle { - func() { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - handler.Generic(ctx, evt, queue) - }() - } +// WithDestBufferSize specifies the buffer size of dest channels. +func WithDestBufferSize(destBufferSize int) ChannelOption { + return func(o *internal.ChannelOptions) { + if destBufferSize <= 0 { + return // ignore invalid buffer size } - }() - return nil -} - -func (cs *Channel) doStop() { - cs.destLock.Lock() - defer cs.destLock.Unlock() - - for _, dst := range cs.dest { - close(dst) + o.DestBufferSize = destBufferSize } } -func (cs *Channel) distribute(evt event.GenericEvent) { - cs.destLock.Lock() - defer cs.destLock.Unlock() - - for _, dst := range cs.dest { - // We cannot make it under goroutine here, or we'll meet the - // race condition of writing message to closed channels. - // To avoid blocking, the dest channels are expected to be of - // proper buffer size. If we still see it blocked, then - // the controller is thought to be in an abnormal state. - dst <- evt +// Channel creates a ChannelSource with the given buffer size. +func Channel(broadcaster *internal.ChannelBroadcaster, eventhandler handler.EventHandler, options ...ChannelOption) Source { + opts := internal.ChannelOptions{ + // 1024 is the default number of event notifications that can be buffered. + DestBufferSize: 1024, } -} - -func (cs *Channel) syncLoop(ctx context.Context) { - for { - select { - case <-ctx.Done(): - // Close destination channels - cs.doStop() - return - case evt, stillOpen := <-cs.Source: - if !stillOpen { - // if the source channel is closed, we're never gonna get - // anything more on it, so stop & bail - cs.doStop() - return - } - cs.distribute(evt) + for _, o := range options { + if o == nil { + continue // ignore nil options } - } -} - -// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Informer struct { - // Informer is the controller-runtime Informer - Informer cache.Informer -} - -var _ Source = &Informer{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Informer should have been specified by the user. - if is.Informer == nil { - return fmt.Errorf("must specify Informer.Informer") + o(&opts) } - _, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) - if err != nil { - return err - } - return nil + return &internal.Channel{Options: opts, Broadcaster: broadcaster, EventHandler: eventhandler} } -func (is *Informer) String() string { - return fmt.Sprintf("informer source: %p", is.Informer) +// Informer creates an InformerSource with the given cache provider. +func Informer(informer cache.Informer, eventhandler handler.EventHandler) Source { + return &internal.Informer{Informer: informer, EventHandler: eventhandler} } -var _ Source = Func(nil) +// StartFunc is a function that starts a goroutine and returns immediately. +type StartFunc = internal.StartFunc -// Func is a function that implements Source. -type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +// FuncOption is a functional option for configuring a Func source. +type FuncOption func(*internal.FuncOptions) -// Start implements Source. -func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, - pr ...predicate.Predicate) error { - return f(ctx, evt, queue, pr...) +// WithShutdownFunc specifies the ShutdownFunc of the Func source. +func WithShutdownFunc(fn func() error) FuncOption { + return func(o *internal.FuncOptions) { + o.ShutdownFunc = fn + } } -func (f Func) String() string { - return fmt.Sprintf("func source: %p", f) +// Func creates a FuncSource with the given function. +func Func(f StartFunc, options ...FuncOption) Source { + opts := internal.FuncOptions{} + for _, o := range options { + if o == nil { + continue // ignore nil options + } + o(&opts) + } + return &internal.FuncSource{StartFunc: f, Options: opts} } diff --git a/pkg/source/source_integration_test.go b/pkg/source/source_integration_test.go index 594d3c9a9c..6d6515d8aa 100644 --- a/pkg/source/source_integration_test.go +++ b/pkg/source/source_integration_test.go @@ -37,7 +37,6 @@ import ( ) var _ = Describe("Source", func() { - var instance1, instance2 source.Source var obj client.Object var q workqueue.RateLimitingInterface var c1, c2 chan interface{} @@ -58,11 +57,6 @@ var _ = Describe("Source", func() { c2 = make(chan interface{}) }) - JustBeforeEach(func() { - instance1 = source.Kind(icache, obj) - instance2 = source.Kind(icache, obj) - }) - AfterEach(func() { err := clientset.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -101,31 +95,33 @@ var _ = Describe("Source", func() { } // Create an event handler to verify the events - newHandler := func(c chan interface{}) handler.Funcs { - return handler.Funcs{ - CreateFunc: func(ctx context.Context, evt event.CreateEvent, rli workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(rli).To(Equal(q)) - c <- evt - }, - UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, rli workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(rli).To(Equal(q)) - c <- evt - }, - DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, rli workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(rli).To(Equal(q)) - c <- evt + newSource := func(c chan interface{}) source.Source { + return source.Kind( + icache, + obj, + handler.Funcs{ + CreateFunc: func(ctx context.Context, evt event.CreateEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, + UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, + DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, }, - } + ) } - handler1 := newHandler(c1) - handler2 := newHandler(c2) // Create 2 instances - Expect(instance1.Start(ctx, handler1, q)).To(Succeed()) - Expect(instance2.Start(ctx, handler2, q)).To(Succeed()) + Expect(newSource(c1).Start(ctx, q)).To(Succeed()) + Expect(newSource(c2).Start(ctx, q)).To(Succeed()) By("Creating a Deployment and expecting the CreateEvent.") created, err = client.Create(ctx, deployment, metav1.CreateOptions{}) @@ -241,8 +237,7 @@ var _ = Describe("Source", func() { c := make(chan struct{}) q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err := instance.Start(ctx, handler.Funcs{ + instance := source.Informer(depInformer, handler.Funcs{ CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { defer GinkgoRecover() var err error @@ -265,7 +260,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) _, err = clientset.AppsV1().ReplicaSets("default").Create(ctx, rs, metav1.CreateOptions{}) @@ -282,8 +278,7 @@ var _ = Describe("Source", func() { rs2.SetLabels(map[string]string{"biz": "baz"}) q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err = instance.Start(ctx, handler.Funcs{ + instance := source.Informer(depInformer, handler.Funcs{ CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { }, UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) { @@ -307,7 +302,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err = instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) _, err = clientset.AppsV1().ReplicaSets("default").Update(ctx, rs2, metav1.UpdateOptions{}) @@ -319,8 +315,7 @@ var _ = Describe("Source", func() { c := make(chan struct{}) q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err := instance.Start(ctx, handler.Funcs{ + instance := source.Informer(depInformer, handler.Funcs{ CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { }, UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { @@ -335,7 +330,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) err = clientset.AppsV1().ReplicaSets("default").Delete(ctx, rs.Name, metav1.DeleteOptions{}) diff --git a/pkg/source/source_test.go b/pkg/source/source_test.go index 16c365e8a2..da88683f96 100644 --- a/pkg/source/source_test.go +++ b/pkg/source/source_test.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -36,12 +37,63 @@ import ( ) var _ = Describe("Source", func() { + Describe("Informer", func() { + var inf *controllertest.FakeInformer + var ctx context.Context + var cancel context.CancelFunc + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + inf = &controllertest.FakeInformer{} + }) + + AfterEach(func() { + cancel() + }) + + Context("should error", func() { + It("if no Informer specified", func() { + instance := source.Informer(nil, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Informer with a non-nil Informer")) + }) + + It("when Start is called twice", func() { + instance := source.Informer(inf, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot start an already started Informer source")) + }) + }) + + Context("should not panic", func() { + It("when Shutdown is called before Start", func() { + instance := source.Informer(inf, nil) + err := instance.Shutdown() + Expect(err).NotTo(HaveOccurred()) + }) + + It("when Shutdown is called twice", func() { + instance := source.Informer(inf, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + cancel() + Expect(instance.Shutdown()).To(Succeed()) + Expect(instance.Shutdown()).To(Succeed()) + }) + }) + }) + Describe("Kind", func() { var c chan struct{} var p *corev1.Pod var ic *informertest.FakeInformers + var ctx context.Context + var cancel context.CancelFunc BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) ic = &informertest.FakeInformers{} c = make(chan struct{}) p = &corev1.Pod{ @@ -53,6 +105,10 @@ var _ = Describe("Source", func() { } }) + AfterEach(func() { + cancel() + }) + Context("for a Pod resource", func() { It("should provide a Pod CreateEvent", func() { c := make(chan struct{}) @@ -65,8 +121,7 @@ var _ = Describe("Source", func() { } q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := source.Kind(ic, &corev1.Pod{}) - err := instance.Start(ctx, handler.Funcs{ + instance := source.Kind(ic, &corev1.Pod{}, handler.Funcs{ CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { defer GinkgoRecover() Expect(q2).To(Equal(q)) @@ -85,7 +140,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) @@ -102,8 +158,7 @@ var _ = Describe("Source", func() { ic := &informertest.FakeInformers{} q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := source.Kind(ic, &corev1.Pod{}) - err := instance.Start(ctx, handler.Funcs{ + instance := source.Kind(ic, &corev1.Pod{}, handler.Funcs{ CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { defer GinkgoRecover() Fail("Unexpected CreateEvent") @@ -125,7 +180,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) @@ -147,8 +203,7 @@ var _ = Describe("Source", func() { } q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := source.Kind(ic, &corev1.Pod{}) - err := instance.Start(ctx, handler.Funcs{ + instance := source.Kind(ic, &corev1.Pod{}, handler.Funcs{ CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { defer GinkgoRecover() Fail("Unexpected DeleteEvent") @@ -167,7 +222,8 @@ var _ = Describe("Source", func() { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) @@ -179,27 +235,36 @@ var _ = Describe("Source", func() { }) }) + It("should return an error from Start eventhandler was not provided", func() { + instance := source.Kind(ic, &corev1.Pod{}, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil EventHandler")) + }) + It("should return an error from Start cache was not provided", func() { - instance := source.Kind(nil, &corev1.Pod{}) - err := instance.Start(ctx, nil, nil) + instance := source.Kind(nil, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + err := instance.Start(ctx, nil) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil cache")) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil Cache")) }) It("should return an error from Start if a type was not provided", func() { - instance := source.Kind(ic, nil) - err := instance.Start(ctx, nil, nil) + instance := source.Kind(ic, nil, &handler.EnqueueRequestForObject{}) + err := instance.Start(ctx, nil) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil object")) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil Type")) }) It("should return an error if syncing fails", func() { f := false - instance := source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}) - Expect(instance.Start(context.Background(), nil, nil)).NotTo(HaveOccurred()) - err := instance.WaitForSync(context.Background()) + instance := source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(context.Background(), nil)).NotTo(HaveOccurred()) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := instance.WaitForSync(timeoutCtx) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("cache did not sync")) + Expect(err.Error()).To(Equal("informer did not sync in time: context deadline exceeded")) }) @@ -211,45 +276,81 @@ var _ = Describe("Source", func() { ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() - instance := source.Kind(ic, &corev1.Pod{}) - err := instance.Start(ctx, handler.Funcs{}, q) + instance := source.Kind(ic, &corev1.Pod{}, handler.Funcs{}) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) - Eventually(instance.WaitForSync).WithArguments(context.Background()).Should(HaveOccurred()) + Eventually(instance.WaitForSync).WithArguments(ctx).Should(HaveOccurred()) }) }) It("should return an error if syncing fails", func() { f := false - instance := source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}) - Expect(instance.Start(context.Background(), nil, nil)).NotTo(HaveOccurred()) - err := instance.WaitForSync(context.Background()) + instance := source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(context.Background(), nil)).NotTo(HaveOccurred()) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := instance.WaitForSync(timeoutCtx) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("cache did not sync")) + Expect(err.Error()).To(Equal("informer did not sync in time: context deadline exceeded")) + + }) + + Context("should error", func() { + It("if no cache specified", func() { + instance := source.Kind(nil, &corev1.Pod{}, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil Cache")) + }) + + It("if no type specified", func() { + instance := source.Kind(ic, nil, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil Type")) + }) + + It("when Start is called twice", func() { + instance := source.Kind(ic, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot start an already started Kind source")) + }) + }) + + Context("should not panic", func() { + It("when Shutdown is called before Start", func() { + instance := source.Kind(ic, &corev1.Pod{}, nil) + err := instance.Shutdown() + Expect(err).NotTo(HaveOccurred()) + }) + It("when Shutdown is called twice", func() { + instance := source.Kind(ic, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + cancel() + Expect(instance.Shutdown()).To(Succeed()) + Expect(instance.Shutdown()).To(Succeed()) + }) }) }) Describe("Func", func() { It("should be called from Start", func() { run := false - instance := source.Func(func( - context.Context, - handler.EventHandler, - workqueue.RateLimitingInterface, ...predicate.Predicate) error { + instance := source.Func(func(context.Context, workqueue.RateLimitingInterface) error { run = true return nil }) - Expect(instance.Start(ctx, nil, nil)).NotTo(HaveOccurred()) + Expect(instance.Start(ctx, nil)).NotTo(HaveOccurred()) Expect(run).To(BeTrue()) expected := fmt.Errorf("expected error: Func") - instance = source.Func(func( - context.Context, - handler.EventHandler, - workqueue.RateLimitingInterface, ...predicate.Predicate) error { + instance = source.Func(func(context.Context, workqueue.RateLimitingInterface) error { return expected }) - Expect(instance.Start(ctx, nil, nil)).To(Equal(expected)) + Expect(instance.Start(ctx, nil)).To(Equal(expected)) }) }) @@ -289,29 +390,35 @@ var _ = Describe("Source", func() { } q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{Source: ch} - err := instance.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - // The empty event should have been filtered out by the predicates, - // and will not be passed to the handler. - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Object).To(Equal(p)) - close(c) - }, - }, q, prct) + instance := source.Channel( + source.NewChannelBroadcaster(ch), + handler.WithPredicates( + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + // The empty event should have been filtered out by the predicates, + // and will not be passed to the handler. + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + close(c) + }, + }, + prct, + ), + ) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) ch <- invalidEvt @@ -327,34 +434,37 @@ var _ = Describe("Source", func() { q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") // Add a handler to get distribution blocked - instance := &source.Channel{Source: ch} - instance.DestBufferSize = 1 - err := instance.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - // Block for the first time - if eventCount == 0 { - <-unblock - } - eventCount++ - - if eventCount == 3 { - close(processed) - } + instance := source.Channel( + source.NewChannelBroadcaster(ch), + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + // Block for the first time + if eventCount == 0 { + <-unblock + } + eventCount++ + + if eventCount == 3 { + close(processed) + } + }, }, - }, q) + source.WithDestBufferSize(1), + ) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) // Write 3 events into the source channel. @@ -383,28 +493,31 @@ var _ = Describe("Source", func() { q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") // Add a handler to get distribution blocked - instance := &source.Channel{Source: ch} - instance.DestBufferSize = 1 + instance := source.Channel( + source.NewChannelBroadcaster(ch), + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() - err := instance.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") + close(processed) + }, }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() + source.WithDestBufferSize(1), + ) - close(processed) - }, - }, q) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) <-processed @@ -421,42 +534,46 @@ var _ = Describe("Source", func() { ch <- evt close(ch) - By("feeding that channel to a channel source") - src := &source.Channel{Source: ch} - processed := make(chan struct{}) defer close(processed) - err := src.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() + By("feeding that channel to a channel source") + src := source.Channel( + source.NewChannelBroadcaster(ch), + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() - processed <- struct{}{} + processed <- struct{}{} + }, }, - }, q) + ) + + err := src.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) By("expecting to only get one event") Eventually(processed).Should(Receive()) Consistently(processed).ShouldNot(Receive()) }) + It("should get error if no source specified", func() { q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{ /*no source specified*/ } - err := instance.Start(ctx, handler.Funcs{}, q) - Expect(err).To(Equal(fmt.Errorf("must specify Channel.Source"))) + instance := source.Channel(nil, handler.Funcs{}) + err := instance.Start(ctx, q) + Expect(err).To(Equal(fmt.Errorf("must create Channel with a non-nil Broadcaster"))) }) }) Context("for multi sources (handlers)", func() { @@ -469,65 +586,87 @@ var _ = Describe("Source", func() { Object: p, } - var resEvent1, resEvent2 event.GenericEvent - c1 := make(chan struct{}) - c2 := make(chan struct{}) - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{Source: ch} - err := instance.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Object).To(Equal(p)) - resEvent1 = evt - close(c1) - }, - }, q) + channelSource := source.NewChannelBroadcaster(ch) + + createInstance := func(c chan event.GenericEvent) source.Source { + return source.Channel( + channelSource, + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + c <- evt + }, + }, + ) + } + + c1 := make(chan event.GenericEvent) + c2 := make(chan event.GenericEvent) + + err := createInstance(c1).Start(ctx, q) Expect(err).NotTo(HaveOccurred()) - err = instance.Start(ctx, handler.Funcs{ - CreateFunc: func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Object).To(Equal(p)) - resEvent2 = evt - close(c2) - }, - }, q) + err = createInstance(c2).Start(ctx, q) Expect(err).NotTo(HaveOccurred()) ch <- evt - <-c1 - <-c2 + resEvent1 := <-c1 + resEvent2 := <-c2 // Validate the two handlers received same event Expect(resEvent1).To(Equal(resEvent2)) }) }) + + Context("should error", func() { + It("if no source specified", func() { + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := source.Channel(nil, handler.Funcs{}) + err := instance.Start(ctx, q) + Expect(err).To(Equal(fmt.Errorf("must create Channel with a non-nil Broadcaster"))) + }) + + It("when Start is called twice", func() { + ch := make(chan event.GenericEvent) + instance := source.Channel(source.NewChannelBroadcaster(ch), &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot start an already started Channel source")) + }) + }) + + Context("should not panic", func() { + It("when Shutdown is called before Start", func() { + ch := make(chan event.GenericEvent) + instance := source.Channel(source.NewChannelBroadcaster(ch), nil) + err := instance.Shutdown() + Expect(err).NotTo(HaveOccurred()) + }) + + It("when Shutdown is called twice", func() { + ch := make(chan event.GenericEvent) + instance := source.Channel(source.NewChannelBroadcaster(ch), &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).To(Succeed()) + cancel() + Expect(instance.Shutdown()).To(Succeed()) + Expect(instance.Shutdown()).To(Succeed()) + }) + }) }) })