From b9ab220a5f0d333575f41f95fd418339e678506e Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Sun, 11 Sep 2022 18:18:08 +0000 Subject: [PATCH 01/28] Building up apf for kcp --- .../common/delegating_event_handler.go | 123 ++++++++++++ pkg/reconciler/common/delegating_informer.go | 28 +++ .../common/scoped_generic_informer.go | 149 ++++++++++++++ pkg/reconciler/common/util.go | 25 +++ pkg/reconciler/kubeapf/kube_apf.go | 169 ++++++++++++++++ .../kubeapf/scoped_shared_informer_factory.go | 188 ++++++++++++++++++ .../kubeapf/single_cluster_flow_control.go | 181 +++++++++++++++++ pkg/server/config.go | 14 ++ 8 files changed, 877 insertions(+) create mode 100644 pkg/reconciler/common/delegating_event_handler.go create mode 100644 pkg/reconciler/common/delegating_informer.go create mode 100644 pkg/reconciler/common/scoped_generic_informer.go create mode 100644 pkg/reconciler/common/util.go create mode 100644 pkg/reconciler/kubeapf/kube_apf.go create mode 100644 pkg/reconciler/kubeapf/scoped_shared_informer_factory.go create mode 100644 pkg/reconciler/kubeapf/single_cluster_flow_control.go diff --git a/pkg/reconciler/common/delegating_event_handler.go b/pkg/reconciler/common/delegating_event_handler.go new file mode 100644 index 00000000000..fb2b4b39f92 --- /dev/null +++ b/pkg/reconciler/common/delegating_event_handler.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "sync" + "time" + + "github.com/kcp-dev/logicalcluster/v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// DelegatingEventHandler multiplexes event handlers for multiple resource types and logical clusters. +type DelegatingEventHandler struct { + lock sync.RWMutex + eventHandlers map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler +} + +// NewDelegatingEventHandler returns a new delegatingEventHandler. +func NewDelegatingEventHandler() *DelegatingEventHandler { + return &DelegatingEventHandler{ + eventHandlers: map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler{}, + } +} + +// registerEventHandler registers an event handler, h, to receive events for the given resource/informer scoped only to +// clusterName. +func (d *DelegatingEventHandler) registerEventHandler( + resource schema.GroupResource, + informer cache.SharedIndexInformer, + clusterName logicalcluster.Name, + h cache.ResourceEventHandler, +) { + d.lock.Lock() + defer d.lock.Unlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} + d.eventHandlers[resource] = groupResourceHandlers + + informer.AddEventHandler(d.resourceEventHandlerFuncs(resource)) + } + + groupResourceHandlers[clusterName] = h +} + +// registerEventHandlerWithResyncPeriod registers an event handler, h, to receive events for the given resource/informer +// scoped only to clusterName, with the given resync period. +func (d *DelegatingEventHandler) registerEventHandlerWithResyncPeriod( + resource schema.GroupResource, + informer cache.SharedIndexInformer, + clusterName logicalcluster.Name, + h cache.ResourceEventHandler, + resyncPeriod time.Duration, +) { + d.lock.Lock() + defer d.lock.Unlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} + d.eventHandlers[resource] = groupResourceHandlers + + informer.AddEventHandlerWithResyncPeriod(d.resourceEventHandlerFuncs(resource), resyncPeriod) + } + + groupResourceHandlers[clusterName] = h +} + +func (d *DelegatingEventHandler) resourceEventHandlerFuncs(resource schema.GroupResource) cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if h := d.getEventHandler(resource, obj); h != nil { + h.OnAdd(obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + if h := d.getEventHandler(resource, oldObj); h != nil { + h.OnUpdate(oldObj, newObj) + } + }, + DeleteFunc: func(obj interface{}) { + if h := d.getEventHandler(resource, obj); h != nil { + h.OnDelete(obj) + } + }, + } +} + +// getEventHandler returns a cache.ResourceEventHandler for resource and the logicalcluster.Name for obj. +func (d *DelegatingEventHandler) getEventHandler(resource schema.GroupResource, obj interface{}) cache.ResourceEventHandler { + clusterName := ClusterNameForObj(obj) + if clusterName.Empty() { + return nil + } + + d.lock.RLock() + defer d.lock.RUnlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + return nil + } + + return groupResourceHandlers[clusterName] +} diff --git a/pkg/reconciler/common/delegating_informer.go b/pkg/reconciler/common/delegating_informer.go new file mode 100644 index 00000000000..3875b45f8d2 --- /dev/null +++ b/pkg/reconciler/common/delegating_informer.go @@ -0,0 +1,28 @@ +package common + +import ( + "time" + + "github.com/kcp-dev/logicalcluster/v2" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// DelegatingInformer embeds a cache.SharedIndexInformer, delegating adding event handlers to +// registerEventHandlerForCluster. +type DelegatingInformer struct { + ClusterName logicalcluster.Name + Resource schema.GroupResource + cache.SharedIndexInformer + DelegatingEventHandler *DelegatingEventHandler +} + +// AddEventHandler registers with the delegating event handler. +func (d *DelegatingInformer) AddEventHandler(handler cache.ResourceEventHandler) { + d.DelegatingEventHandler.registerEventHandler(d.Resource, d.SharedIndexInformer, d.ClusterName, handler) +} + +// AddEventHandlerWithResyncPeriod registers with the delegating event handler with the given resync period. +func (d *DelegatingInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + d.DelegatingEventHandler.registerEventHandlerWithResyncPeriod(d.Resource, d.SharedIndexInformer, d.ClusterName, handler, resyncPeriod) +} diff --git a/pkg/reconciler/common/scoped_generic_informer.go b/pkg/reconciler/common/scoped_generic_informer.go new file mode 100644 index 00000000000..7588d7bf0e8 --- /dev/null +++ b/pkg/reconciler/common/scoped_generic_informer.go @@ -0,0 +1,149 @@ +package common + +import ( + "github.com/kcp-dev/kcp/pkg/indexers" + "github.com/kcp-dev/logicalcluster/v2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" +) + +// scopedGenericInformer wraps an informers.GenericInformer and produces instances of cache.GenericLister that are +// scoped to a single logical cluster. +type scopedGenericInformer struct { + delegate informers.GenericInformer + clusterName logicalcluster.Name + resource schema.GroupResource + delegatingEventHandler *DelegatingEventHandler +} + +// Informer invokes Informer() on the underlying informers.GenericInformer. +func (s *scopedGenericInformer) Informer() cache.SharedIndexInformer { + return &DelegatingInformer{ + ClusterName: s.clusterName, + SharedIndexInformer: s.delegate.Informer(), + DelegatingEventHandler: s.delegatingEventHandler, + } +} + +// Lister returns an implementation of cache.GenericLister that is scoped to a single logical cluster. +func (s *scopedGenericInformer) Lister() cache.GenericLister { + return &scopedGenericLister{ + indexer: s.delegate.Informer().GetIndexer(), + clusterName: s.clusterName, + resource: s.resource, + } +} + +// scopedGenericLister wraps a cache.Indexer to implement a cache.GenericLister that is scoped to a single logical +// cluster. +type scopedGenericLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name + resource schema.GroupResource +} + +// List returns all instances from the cache.Indexer scoped to a single logical cluster and matching selector. +func (s *scopedGenericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + ret = append(ret, obj.(runtime.Object)) + }) + return ret, err +} + +// ByNamespace returns an implementation of cache.GenericNamespaceLister that is scoped to a single logical cluster. +func (s *scopedGenericLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &scopedGenericNamespaceLister{ + indexer: s.indexer, + clusterName: s.clusterName, + namespace: namespace, + resource: s.resource, + } +} + +// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster. +func (s *scopedGenericLister) Get(name string) (runtime.Object, error) { + key := clusters.ToClusterAwareKey(s.clusterName, name) + obj, exists, err := s.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +func ListByIndex(indexer cache.Indexer, indexName, indexValue string, selector labels.Selector, appendFunc func(obj interface{})) error { + selectAll := selector == nil || selector.Empty() + + list, err := indexer.ByIndex(indexName, indexValue) + if err != nil { + return err + } + + for i := range list { + if selectAll { + appendFunc(list[i]) + continue + } + + metadata, err := meta.Accessor(list[i]) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFunc(list[i]) + } + } + + return nil +} + +// scopedGenericNamespaceLister wraps a cache.Indexer to implement a cache.GenericNamespaceLister that is scoped to a +// single logical cluster. +type scopedGenericNamespaceLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name + namespace string + resource schema.GroupResource +} + +// List lists all instances from the cache.Indexer scoped to a single logical cluster and namespace, and matching +// selector. +func (s *scopedGenericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + // To support e.g. quota for cluster-scoped resources, we've hacked the k8s quota to use namespace="" when + // checking quota for cluster-scoped resources. But because all the upstream quota code is written to only + // support namespace-scoped resources, we have to hack the "namespace lister" to support returning all items + // when its namespace is "". + var indexName, indexValue string + if s.namespace == "" { + indexName = indexers.ByLogicalCluster + indexValue = s.clusterName.String() + } else { + indexName = indexers.ByLogicalClusterAndNamespace + indexValue = clusters.ToClusterAwareKey(s.clusterName, s.namespace) + } + err = ListByIndex(s.indexer, indexName, indexValue, selector, func(obj interface{}) { + ret = append(ret, obj.(runtime.Object)) + }) + return ret, err +} + +// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster and +// namespace. +func (s *scopedGenericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/pkg/reconciler/common/util.go b/pkg/reconciler/common/util.go new file mode 100644 index 00000000000..c0dd3defd51 --- /dev/null +++ b/pkg/reconciler/common/util.go @@ -0,0 +1,25 @@ +package common + +import ( + "github.com/kcp-dev/logicalcluster/v2" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" +) + +func ClusterNameForObj(obj interface{}) logicalcluster.Name { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return logicalcluster.Name{} + } + + _, clusterAndName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + utilruntime.HandleError(err) + return logicalcluster.Name{} + } + + cluster, _ := clusters.SplitClusterAwareKey(clusterAndName) + return cluster +} diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go new file mode 100644 index 00000000000..8e5d6830b45 --- /dev/null +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -0,0 +1,169 @@ +package kubeapf + +import ( + "context" + "net/http" + "sync" + "time" + + "github.com/kcp-dev/logicalcluster/v2" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + flowcontrol "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/apiserver/pkg/endpoints/request" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/mux" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" +) + +// KubeApfDelegator implements k8s APF controller interface +// it is cluster-aware and manages the life cycles of +// cluster-specific APF controller instances and delegates +// requests from the handler chain to them +type KubeApfDelegator struct { + // scopingInformerFactory is not cluster scoped but can be made cluster scoped + scopingSharedInformerFactory *scopingSharedInformerFactory + // scopingGenericSharedInformerFactory *kubequota.ScopingGenericSharedInformerFactory + kubeCluster kubernetes.ClusterInterface + + // flowcontrolClient should be cluster scoped + flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface + + // for now assume these are globl configurations for all logical clusters to inherit + serverConcurrencyLimit int + requestWaitLimit time.Duration + + // delegates stores the references to cluster scoped apf filters + delegates map[logicalcluster.Name]utilflowcontrol.Interface + + lock sync.RWMutex +} + +// Make sure utilflowcontrol.Interface is implemented +var _ utilflowcontrol.Interface = &KubeApfDelegator{} +var defaultCluster logicalcluster.Name = logicalcluster.Wildcard + +// NewKubeApfDelegator +func NewKubeApfDelegator( + informerFactory kubeinformers.SharedInformerFactory, + kubeCluster kubernetes.ClusterInterface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, +) *KubeApfDelegator { + newFilter := &KubeApfDelegator{ + scopingSharedInformerFactory: newScopingSharedInformerFactory(informerFactory), // not cluster scoped + kubeCluster: kubeCluster, // can be made cluster scoped + serverConcurrencyLimit: serverConcurrencyLimit, + requestWaitLimit: requestWaitLimit, + delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, + } + + // start APF on the default Cluster + flowcontrolClient := kubeCluster.Cluster(defaultCluster).FlowcontrolV1beta2() + scopedSharedInformerFactory := newFilter.scopingSharedInformerFactory.ForCluster(defaultCluster) + newFilter.delegates[defaultCluster] = utilflowcontrol.New(scopedSharedInformerFactory, flowcontrolClient, serverConcurrencyLimit, requestWaitLimit) + + return newFilter +} + +// Handle implements flowcontrol.Interface +func (k *KubeApfDelegator) Handle(ctx context.Context, + requestDigest utilflowcontrol.RequestDigest, + noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string), + workEstimator func() fcrequest.WorkEstimate, + queueNoteFn fq.QueueNoteFn, + execFn func(), +) { + // TODO: missing error handling + cluster, _ := genericapirequest.ValidClusterFrom(ctx) + klog.V(3).InfoS("KubeApfFilter Handle request for cluster ", "clusterName", cluster.Name) + + delegate, _ := k.getOrCreateDelegate(cluster.Name) + + // k.delegates[defaultCluster].Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) + delegate.Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) +} + +// GetInterestedWatchCount implements flowcontrol.Interface +func (k *KubeApfDelegator) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { + return k.delegates[defaultCluster].GetInterestedWatchCount(requestInfo) +} + +// RegisterWatch implements flowcontrol.Interface +func (k *KubeApfDelegator) RegisterWatch(r *http.Request) utilflowcontrol.ForgetWatchFunc { + return k.delegates[defaultCluster].RegisterWatch(r) +} + +// Install implements flowcontrol.Interface +func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { + k.delegates[defaultCluster].Install(c) +} + +// MaintainObservations implements flowcontrol.Interface +func (k *KubeApfDelegator) MaintainObservations(stopCh <-chan struct{}) { + k.delegates[defaultCluster].MaintainObservations(stopCh) +} + +// Run implements flowcontrol.Interface +// it starts kube apf controller +func (k *KubeApfDelegator) Run(stopCh <-chan struct{}) error { + // TODO: start ClusterWorkspaceDeletionMonitor + + // Run kube apf controller. Cluster specific apf controller will be created + // on demand + return k.delegates[defaultCluster].Run(stopCh) +} + +// getOrCreateDelegate creates a utilflowcontrol.Interface (apf filter) for clusterName. +func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) (utilflowcontrol.Interface, error) { + k.lock.RLock() + delegate := k.delegates[clusterName] + k.lock.RUnlock() + + if delegate != nil { + return delegate, nil + } + + k.lock.Lock() + defer k.lock.Unlock() + + delegate = k.delegates[clusterName] + if delegate != nil { + return delegate, nil + } + + // // Set up a context that is cancelable and that is bounded by k.serverDone + // ctx, cancel := context.WithCancel(context.Background()) + // go func() { + // // Wait for either the context or the server to be done. If it's the server, cancel the context. + // select { + // case <-ctx.Done(): + // case <-k.serverDone: + // cancel() + // } + // }() + + // TODO: new delegate should use cluster scoped informer factory and flowcontrol clients + scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(defaultCluster) + flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() + delegate = utilflowcontrol.New( + scopedInformerFactory, + flowcontrolClient, + k.serverConcurrencyLimit, + k.requestWaitLimit, + ) + + k.delegates[clusterName] = delegate + // TODO: implement stop channel mechanism + stopCh := make(chan struct{}) + // Start cluster scoped apf controller + go delegate.Run(stopCh) + + klog.V(3).InfoS("Starting new apf controller for cluster", "clusterName", clusterName) + return delegate, nil +} diff --git a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go new file mode 100644 index 00000000000..e09cb9587cd --- /dev/null +++ b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go @@ -0,0 +1,188 @@ +package kubeapf + +import ( + reflect "reflect" + "sync" + + "github.com/kcp-dev/logicalcluster/v2" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + admissionregistration "k8s.io/client-go/informers/admissionregistration" + apiserverinternal "k8s.io/client-go/informers/apiserverinternal" + apps "k8s.io/client-go/informers/apps" + autoscaling "k8s.io/client-go/informers/autoscaling" + batch "k8s.io/client-go/informers/batch" + certificates "k8s.io/client-go/informers/certificates" + coordination "k8s.io/client-go/informers/coordination" + core "k8s.io/client-go/informers/core" + discovery "k8s.io/client-go/informers/discovery" + events "k8s.io/client-go/informers/events" + extensions "k8s.io/client-go/informers/extensions" + flowcontrol "k8s.io/client-go/informers/flowcontrol" + "k8s.io/client-go/informers/internalinterfaces" + networking "k8s.io/client-go/informers/networking" + node "k8s.io/client-go/informers/node" + policy "k8s.io/client-go/informers/policy" + rbac "k8s.io/client-go/informers/rbac" + scheduling "k8s.io/client-go/informers/scheduling" + storage "k8s.io/client-go/informers/storage" + cache "k8s.io/client-go/tools/cache" +) + +// scopingSharedInformerFactory is used to create scopedSharedInformerFactory that +// targets a specific logical cluster +type scopingSharedInformerFactory struct { + factory informers.SharedInformerFactory +} + +func newScopingSharedInformerFactory(factory informers.SharedInformerFactory) *scopingSharedInformerFactory { + return &scopingSharedInformerFactory{factory: factory} +} + +// ForCluster returns a scopedSharedInformerFactory that works on a specific logical cluster +func (f *scopingSharedInformerFactory) ForCluster(clusterName logicalcluster.Name) *scopedSharedInformerFactory { + return &scopedSharedInformerFactory{ + delegate: f.factory, + clusterName: clusterName, + } +} + +// scopedSharedInformerFactory can create informers (currently limited to Flowcontrol) +// that are for a single cluster +type scopedSharedInformerFactory struct { + lock sync.RWMutex + delegate informers.SharedInformerFactory + clusterName logicalcluster.Name +} + +var _ informers.SharedInformerFactory = &scopedSharedInformerFactory{} + +// ExtraClusterScopedIndexers implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) ExtraClusterScopedIndexers() cache.Indexers { + panic("unimplemented") +} + +// ExtraNamespaceScopedIndexers implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) ExtraNamespaceScopedIndexers() cache.Indexers { + panic("unimplemented") +} + +// InformerFor implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + panic("unimplemented") +} + +// KeyFunction implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) KeyFunction() cache.KeyFunc { + panic("unimplemented") +} + +// Start implements informers.SharedInformerFactory +func (f *scopedSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.delegate.Start(stopCh) +} + +// Admissionregistration implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Admissionregistration() admissionregistration.Interface { + panic("unimplemented") +} + +// Apps implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Apps() apps.Interface { + panic("unimplemented") +} + +// Autoscaling implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Autoscaling() autoscaling.Interface { + panic("unimplemented") +} + +// Batch implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Batch() batch.Interface { + panic("unimplemented") +} + +// Certificates implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Certificates() certificates.Interface { + panic("unimplemented") +} + +// Coordination implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Coordination() coordination.Interface { + panic("unimplemented") +} + +// Core implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Core() core.Interface { + panic("unimplemented") +} + +// Discovery implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Discovery() discovery.Interface { + panic("unimplemented") +} + +// Events implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Events() events.Interface { + panic("unimplemented") +} + +// Extensions implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Extensions() extensions.Interface { + panic("unimplemented") +} + +// Flowcontrol returns an implementation of flowcontrol.Interface +// that is targeting a single cluster +func (f *scopedSharedInformerFactory) Flowcontrol() flowcontrol.Interface { + return &scopedFlowcontrol{ + clusterName: f.clusterName, + delegate: f.delegate.Flowcontrol(), + } +} + +// ForResource implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + panic("unimplemented") +} + +// Internal implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Internal() apiserverinternal.Interface { + panic("unimplemented") +} + +// Networking implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Networking() networking.Interface { + panic("unimplemented") +} + +// Node implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Node() node.Interface { + panic("unimplemented") +} + +// Policy implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Policy() policy.Interface { + panic("unimplemented") +} + +// Rbac implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Rbac() rbac.Interface { + panic("unimplemented") +} + +// Scheduling implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Scheduling() scheduling.Interface { + panic("unimplemented") +} + +// Storage implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) Storage() storage.Interface { + panic("unimplemented") +} + +// WaitForCacheSync implements informers.SharedInformerFactory +func (*scopedSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + panic("unimplemented") +} diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go new file mode 100644 index 00000000000..bf329021321 --- /dev/null +++ b/pkg/reconciler/kubeapf/single_cluster_flow_control.go @@ -0,0 +1,181 @@ +package kubeapf + +import ( + "github.com/kcp-dev/kcp/pkg/indexers" + "github.com/kcp-dev/kcp/pkg/reconciler/common" + "github.com/kcp-dev/logicalcluster/v2" + apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + flowcontrol "k8s.io/client-go/informers/flowcontrol" + v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" + v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" + flowcontrollisterv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + cache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" +) + +type scopedFlowcontrol struct { + clusterName logicalcluster.Name + delegate flowcontrol.Interface +} + +var _ flowcontrol.Interface = &scopedFlowcontrol{} + +// V1alpha1 implements flowcontrol.Interface +func (*scopedFlowcontrol) V1alpha1() v1alpha1.Interface { + panic("unimplemented") +} + +// V1beta1 implements flowcontrol.Interface +func (*scopedFlowcontrol) V1beta1() v1beta1.Interface { + panic("unimplemented") +} + +// V1beta2 implements flowcontrol.Interface +func (f *scopedFlowcontrol) V1beta2() v1beta2.Interface { + return &scopedFlowcontrolV1Beta2{ + clusterName: f.clusterName, + delegate: f.delegate.V1beta2(), + delegatingEventHandler: common.NewDelegatingEventHandler(), + } +} + +type scopedFlowcontrolV1Beta2 struct { + clusterName logicalcluster.Name + delegate v1beta2.Interface + delegatingEventHandler *common.DelegatingEventHandler +} + +var _ v1beta2.Interface = &scopedFlowcontrolV1Beta2{} + +// FlowSchemas implements v1beta2.Interface +func (f *scopedFlowcontrolV1Beta2) FlowSchemas() v1beta2.FlowSchemaInformer { + return &scopedFlowSchemaInformer{ + clusterName: f.clusterName, + delegate: f.delegate.FlowSchemas(), + delegatingEventHandler: f.delegatingEventHandler, + } +} + +// PriorityLevelConfigurations implements v1beta2.Interface +func (f *scopedFlowcontrolV1Beta2) PriorityLevelConfigurations() v1beta2.PriorityLevelConfigurationInformer { + return &scopedPriorityLevelConfigurationInformer{ + clusterName: f.clusterName, + delegate: f.delegate.PriorityLevelConfigurations(), + delegatingEventHandler: f.delegatingEventHandler, + } +} + +type scopedFlowSchemaInformer struct { + clusterName logicalcluster.Name + delegate v1beta2.FlowSchemaInformer + delegatingEventHandler *common.DelegatingEventHandler +} +type scopedPriorityLevelConfigurationInformer struct { + clusterName logicalcluster.Name + delegate v1beta2.PriorityLevelConfigurationInformer + delegatingEventHandler *common.DelegatingEventHandler +} + +var _ v1beta2.FlowSchemaInformer = &scopedFlowSchemaInformer{} +var _ v1beta2.PriorityLevelConfigurationInformer = &scopedPriorityLevelConfigurationInformer{} + +// Informer implements v1beta2.FlowSchemaInformer +func (s *scopedFlowSchemaInformer) Informer() cache.SharedIndexInformer { + return &common.DelegatingInformer{ + ClusterName: s.clusterName, + Resource: apiflowcontrolv1beta2.Resource("flowschema"), + SharedIndexInformer: s.delegate.Informer(), + DelegatingEventHandler: s.delegatingEventHandler, + } +} + +// Lister implements v1beta2.FlowSchemaInformer +func (s *scopedFlowSchemaInformer) Lister() flowcontrollisterv1beta2.FlowSchemaLister { + return &SingleClusterFlowSchemaLister{ + indexer: s.delegate.Informer().GetIndexer(), + clusterName: s.clusterName, + } +} + +// Informer implements v1beta2.PriorityLevelConfigurationInformer +func (s *scopedPriorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return &common.DelegatingInformer{ + ClusterName: s.clusterName, + Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfiguration"), + SharedIndexInformer: s.delegate.Informer(), + DelegatingEventHandler: s.delegatingEventHandler, + } +} + +// Lister implements v1beta2.PriorityLevelConfigurationInformer +func (s *scopedPriorityLevelConfigurationInformer) Lister() flowcontrollisterv1beta2.PriorityLevelConfigurationLister { + return &SingleClusterPriorityLevelConfigurationLister{ + indexer: s.delegate.Informer().GetIndexer(), + clusterName: s.clusterName, + } +} + +type SingleClusterFlowSchemaLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name +} + +var _ flowcontrollisterv1beta2.FlowSchemaLister = &SingleClusterFlowSchemaLister{} + +// Get implements v1beta2.FlowSchemaLister +func (s *SingleClusterFlowSchemaLister) Get(name string) (*apiflowcontrolv1beta2.FlowSchema, error) { + key := clusters.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way + obj, exists, err := s.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(apiflowcontrolv1beta2.Resource("flowschema"), name) + } + return obj.(*apiflowcontrolv1beta2.FlowSchema), nil +} + +// List implements v1beta2.FlowSchemaLister +func (s *SingleClusterFlowSchemaLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.FlowSchema, err error) { + if err := common.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + ret = append(ret, obj.(*apiflowcontrolv1beta2.FlowSchema)) + }); err != nil { + return nil, err + } + + return ret, nil +} + +type SingleClusterPriorityLevelConfigurationLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name +} + +var _ flowcontrollisterv1beta2.PriorityLevelConfigurationLister = &SingleClusterPriorityLevelConfigurationLister{} + +// Get implements v1beta2.PriorityLevelConfigurationLister +func (s *SingleClusterPriorityLevelConfigurationLister) Get(name string) (*apiflowcontrolv1beta2.PriorityLevelConfiguration, error) { + key := clusters.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way + obj, exists, err := s.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(apiflowcontrolv1beta2.Resource("prioritylevelconfiguration"), name) + } + return obj.(*apiflowcontrolv1beta2.PriorityLevelConfiguration), nil +} + +// List implements v1beta2.PriorityLevelConfigurationLister +func (s *SingleClusterPriorityLevelConfigurationLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.PriorityLevelConfiguration, err error) { + if err := common.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + ret = append(ret, obj.(*apiflowcontrolv1beta2.PriorityLevelConfiguration)) + }); err != nil { + return nil, err + } + + return ret, nil +} diff --git a/pkg/server/config.go b/pkg/server/config.go index 3c8eb272079..492b8b24f8c 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -62,10 +62,13 @@ import ( "github.com/kcp-dev/kcp/pkg/informer" "github.com/kcp-dev/kcp/pkg/server/bootstrap" kcpfilters "github.com/kcp-dev/kcp/pkg/server/filters" + "github.com/kcp-dev/kcp/pkg/reconciler/kubeapf" kcpserveroptions "github.com/kcp-dev/kcp/pkg/server/options" "github.com/kcp-dev/kcp/pkg/server/options/batteries" "github.com/kcp-dev/kcp/pkg/server/requestinfo" "github.com/kcp-dev/kcp/pkg/tunneler" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) type Config struct { @@ -216,6 +219,17 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { } } + // Call to BuildPriorityAndFairness in kcp instead of kube + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) && c.Options.GenericControlPlane.GenericServerRunOptions.EnablePriorityAndFairness { + c.GenericConfig.FlowControl = kubeapf.NewKubeApfDelegator( + c.KubeSharedInformerFactory, + c.KubeClusterClient, + 1600, + c.Options.GenericControlPlane.GenericServerRunOptions.RequestTimeout/4, + ) + + } + // Setup kcp * informers, but those will need the identities for the APIExports used to make the APIs available. // The identities are not known before we can get them from the APIExports via the loopback client or from the root shard in case this is a non-root shard, // hence we postpone this to getOrCreateKcpIdentities() in the kcp-start-informers post-start hook. From de047e9d3798b38d4445a996bde7422e8fdc36e7 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Mon, 12 Sep 2022 18:29:24 +0000 Subject: [PATCH 02/28] Fix bug in selecting delegate apf controller --- pkg/reconciler/kubeapf/kube_apf.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index 8e5d6830b45..93b63de5f24 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -66,6 +66,8 @@ func NewKubeApfDelegator( // start APF on the default Cluster flowcontrolClient := kubeCluster.Cluster(defaultCluster).FlowcontrolV1beta2() scopedSharedInformerFactory := newFilter.scopingSharedInformerFactory.ForCluster(defaultCluster) + + klog.V(3).InfoS("Creating new apf controller for cluster", "clusterName", defaultCluster) newFilter.delegates[defaultCluster] = utilflowcontrol.New(scopedSharedInformerFactory, flowcontrolClient, serverConcurrencyLimit, requestWaitLimit) return newFilter @@ -85,7 +87,6 @@ func (k *KubeApfDelegator) Handle(ctx context.Context, delegate, _ := k.getOrCreateDelegate(cluster.Name) - // k.delegates[defaultCluster].Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) delegate.Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) } @@ -149,7 +150,7 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) // }() // TODO: new delegate should use cluster scoped informer factory and flowcontrol clients - scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(defaultCluster) + scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(clusterName) flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() delegate = utilflowcontrol.New( scopedInformerFactory, @@ -164,6 +165,6 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) // Start cluster scoped apf controller go delegate.Run(stopCh) - klog.V(3).InfoS("Starting new apf controller for cluster", "clusterName", clusterName) + klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) return delegate, nil } From 7ca4663db0e98070299ed2a10ee1b5fe10613321 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 15 Sep 2022 19:10:15 +0000 Subject: [PATCH 03/28] Instantiate configController when receiving the first request --- pkg/reconciler/kubeapf/kube_apf.go | 65 ++++++++++++------------------ 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index 93b63de5f24..e67ad0b2426 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -18,7 +18,6 @@ import ( utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" ) // KubeApfDelegator implements k8s APF controller interface @@ -28,24 +27,26 @@ import ( type KubeApfDelegator struct { // scopingInformerFactory is not cluster scoped but can be made cluster scoped scopingSharedInformerFactory *scopingSharedInformerFactory - // scopingGenericSharedInformerFactory *kubequota.ScopingGenericSharedInformerFactory + // kubeCluster ClusterInterface can be used to get cluster scoped clientset kubeCluster kubernetes.ClusterInterface - // flowcontrolClient should be cluster scoped - flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface - // for now assume these are globl configurations for all logical clusters to inherit serverConcurrencyLimit int requestWaitLimit time.Duration - // delegates stores the references to cluster scoped apf filters + // delegates are the references to cluster scoped apf controllers delegates map[logicalcluster.Name]utilflowcontrol.Interface lock sync.RWMutex + + pathRecorderMux *mux.PathRecorderMux + + stopCh <-chan struct{} } // Make sure utilflowcontrol.Interface is implemented var _ utilflowcontrol.Interface = &KubeApfDelegator{} + var defaultCluster logicalcluster.Name = logicalcluster.Wildcard // NewKubeApfDelegator @@ -55,22 +56,13 @@ func NewKubeApfDelegator( serverConcurrencyLimit int, requestWaitLimit time.Duration, ) *KubeApfDelegator { - newFilter := &KubeApfDelegator{ + return &KubeApfDelegator{ scopingSharedInformerFactory: newScopingSharedInformerFactory(informerFactory), // not cluster scoped kubeCluster: kubeCluster, // can be made cluster scoped serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, } - - // start APF on the default Cluster - flowcontrolClient := kubeCluster.Cluster(defaultCluster).FlowcontrolV1beta2() - scopedSharedInformerFactory := newFilter.scopingSharedInformerFactory.ForCluster(defaultCluster) - - klog.V(3).InfoS("Creating new apf controller for cluster", "clusterName", defaultCluster) - newFilter.delegates[defaultCluster] = utilflowcontrol.New(scopedSharedInformerFactory, flowcontrolClient, serverConcurrencyLimit, requestWaitLimit) - - return newFilter } // Handle implements flowcontrol.Interface @@ -92,32 +84,35 @@ func (k *KubeApfDelegator) Handle(ctx context.Context, // GetInterestedWatchCount implements flowcontrol.Interface func (k *KubeApfDelegator) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { + // FIXME: Figure out the right way to implement WatchTracker return k.delegates[defaultCluster].GetInterestedWatchCount(requestInfo) } // RegisterWatch implements flowcontrol.Interface func (k *KubeApfDelegator) RegisterWatch(r *http.Request) utilflowcontrol.ForgetWatchFunc { + // FIXME: Figure out the right way to implement WatchTracker return k.delegates[defaultCluster].RegisterWatch(r) } // Install implements flowcontrol.Interface func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { - k.delegates[defaultCluster].Install(c) + // k.pathRecorderMux = c // store the reference for Install later + // Do nothing for now } // MaintainObservations implements flowcontrol.Interface func (k *KubeApfDelegator) MaintainObservations(stopCh <-chan struct{}) { - k.delegates[defaultCluster].MaintainObservations(stopCh) + // TODO: make sure MaintainObservations implementation works with clusters + k.stopCh = stopCh } // Run implements flowcontrol.Interface -// it starts kube apf controller +// The delegator doesn't actually call apf controller Run here +// It stores the stopCh for later use when the cluster scoped +// apf controllers are created func (k *KubeApfDelegator) Run(stopCh <-chan struct{}) error { - // TODO: start ClusterWorkspaceDeletionMonitor - - // Run kube apf controller. Cluster specific apf controller will be created - // on demand - return k.delegates[defaultCluster].Run(stopCh) + k.stopCh = stopCh + return nil } // getOrCreateDelegate creates a utilflowcontrol.Interface (apf filter) for clusterName. @@ -138,32 +133,24 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) return delegate, nil } - // // Set up a context that is cancelable and that is bounded by k.serverDone - // ctx, cancel := context.WithCancel(context.Background()) - // go func() { - // // Wait for either the context or the server to be done. If it's the server, cancel the context. - // select { - // case <-ctx.Done(): - // case <-k.serverDone: - // cancel() - // } - // }() - - // TODO: new delegate should use cluster scoped informer factory and flowcontrol clients + // New delegate uses cluster scoped informer factory and flowcontrol clients scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(clusterName) flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() delegate = utilflowcontrol.New( + clusterName.String()+"-controller", scopedInformerFactory, flowcontrolClient, k.serverConcurrencyLimit, k.requestWaitLimit, ) + // TODO: call scopedInformerFactory.Start? k.delegates[clusterName] = delegate - // TODO: implement stop channel mechanism - stopCh := make(chan struct{}) // Start cluster scoped apf controller - go delegate.Run(stopCh) + go delegate.MaintainObservations(k.stopCh) + go delegate.Run(k.stopCh) + + // delegate.Install(k.pathRecorderMux) klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) return delegate, nil From d023acc5555457b164f7ad3e89d580a6b90b4807 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 15 Sep 2022 21:07:47 +0000 Subject: [PATCH 04/28] Use single delegatingEventHandler for apf --- pkg/reconciler/kubeapf/kube_apf.go | 2 ++ .../kubeapf/scoped_shared_informer_factory.go | 26 ++++++++++++------- .../kubeapf/single_cluster_flow_control.go | 11 ++++---- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index e67ad0b2426..802dfc3bf62 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -145,6 +145,8 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) ) // TODO: call scopedInformerFactory.Start? + scopedInformerFactory.Start(k.stopCh) + k.delegates[clusterName] = delegate // Start cluster scoped apf controller go delegate.MaintainObservations(k.stopCh) diff --git a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go index e09cb9587cd..951c36f5721 100644 --- a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go +++ b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go @@ -2,8 +2,8 @@ package kubeapf import ( reflect "reflect" - "sync" + "github.com/kcp-dev/kcp/pkg/reconciler/common" "github.com/kcp-dev/logicalcluster/v2" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -33,27 +33,32 @@ import ( // scopingSharedInformerFactory is used to create scopedSharedInformerFactory that // targets a specific logical cluster type scopingSharedInformerFactory struct { - factory informers.SharedInformerFactory + factory informers.SharedInformerFactory + delegatingEventHandler *common.DelegatingEventHandler } func newScopingSharedInformerFactory(factory informers.SharedInformerFactory) *scopingSharedInformerFactory { - return &scopingSharedInformerFactory{factory: factory} + return &scopingSharedInformerFactory{ + factory: factory, + delegatingEventHandler: common.NewDelegatingEventHandler(), + } } // ForCluster returns a scopedSharedInformerFactory that works on a specific logical cluster func (f *scopingSharedInformerFactory) ForCluster(clusterName logicalcluster.Name) *scopedSharedInformerFactory { return &scopedSharedInformerFactory{ - delegate: f.factory, - clusterName: clusterName, + delegate: f.factory, + clusterName: clusterName, + delegatingEventHandler: f.delegatingEventHandler, } } // scopedSharedInformerFactory can create informers (currently limited to Flowcontrol) // that are for a single cluster type scopedSharedInformerFactory struct { - lock sync.RWMutex - delegate informers.SharedInformerFactory - clusterName logicalcluster.Name + delegate informers.SharedInformerFactory + clusterName logicalcluster.Name + delegatingEventHandler *common.DelegatingEventHandler } var _ informers.SharedInformerFactory = &scopedSharedInformerFactory{} @@ -137,8 +142,9 @@ func (*scopedSharedInformerFactory) Extensions() extensions.Interface { // that is targeting a single cluster func (f *scopedSharedInformerFactory) Flowcontrol() flowcontrol.Interface { return &scopedFlowcontrol{ - clusterName: f.clusterName, - delegate: f.delegate.Flowcontrol(), + clusterName: f.clusterName, + delegate: f.delegate.Flowcontrol(), + delegatingEventHandler: f.delegatingEventHandler, } } diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go index bf329021321..1ad111d0e45 100644 --- a/pkg/reconciler/kubeapf/single_cluster_flow_control.go +++ b/pkg/reconciler/kubeapf/single_cluster_flow_control.go @@ -17,8 +17,9 @@ import ( ) type scopedFlowcontrol struct { - clusterName logicalcluster.Name - delegate flowcontrol.Interface + clusterName logicalcluster.Name + delegate flowcontrol.Interface + delegatingEventHandler *common.DelegatingEventHandler } var _ flowcontrol.Interface = &scopedFlowcontrol{} @@ -38,7 +39,7 @@ func (f *scopedFlowcontrol) V1beta2() v1beta2.Interface { return &scopedFlowcontrolV1Beta2{ clusterName: f.clusterName, delegate: f.delegate.V1beta2(), - delegatingEventHandler: common.NewDelegatingEventHandler(), + delegatingEventHandler: f.delegatingEventHandler, } } @@ -87,7 +88,7 @@ func (s *scopedFlowSchemaInformer) Informer() cache.SharedIndexInformer { return &common.DelegatingInformer{ ClusterName: s.clusterName, Resource: apiflowcontrolv1beta2.Resource("flowschema"), - SharedIndexInformer: s.delegate.Informer(), + SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared DelegatingEventHandler: s.delegatingEventHandler, } } @@ -105,7 +106,7 @@ func (s *scopedPriorityLevelConfigurationInformer) Informer() cache.SharedIndexI return &common.DelegatingInformer{ ClusterName: s.clusterName, Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfiguration"), - SharedIndexInformer: s.delegate.Informer(), + SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared DelegatingEventHandler: s.delegatingEventHandler, } } From 3f10c8182c4f8af5e533e264e6ec698d734d062a Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Fri, 16 Sep 2022 18:53:28 +0000 Subject: [PATCH 05/28] Use single watch tracker for now --- pkg/reconciler/kubeapf/kube_apf.go | 31 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index 802dfc3bf62..c2818e6947e 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -2,7 +2,6 @@ package kubeapf import ( "context" - "net/http" "sync" "time" @@ -12,7 +11,6 @@ import ( "k8s.io/klog/v2" flowcontrol "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apiserver/pkg/endpoints/request" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/server/mux" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" @@ -42,12 +40,14 @@ type KubeApfDelegator struct { pathRecorderMux *mux.PathRecorderMux stopCh <-chan struct{} + + utilflowcontrol.WatchTracker } // Make sure utilflowcontrol.Interface is implemented var _ utilflowcontrol.Interface = &KubeApfDelegator{} -var defaultCluster logicalcluster.Name = logicalcluster.Wildcard +// var defaultCluster logicalcluster.Name = logicalcluster.Wildcard // NewKubeApfDelegator func NewKubeApfDelegator( @@ -62,6 +62,7 @@ func NewKubeApfDelegator( serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, + WatchTracker: utilflowcontrol.NewWatchTracker(), } } @@ -82,17 +83,17 @@ func (k *KubeApfDelegator) Handle(ctx context.Context, delegate.Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) } -// GetInterestedWatchCount implements flowcontrol.Interface -func (k *KubeApfDelegator) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { - // FIXME: Figure out the right way to implement WatchTracker - return k.delegates[defaultCluster].GetInterestedWatchCount(requestInfo) -} +// // GetInterestedWatchCount implements flowcontrol.Interface +// func (k *KubeApfDelegator) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { +// // FIXME: Figure out the right way to implement WatchTracker +// return k.delegates[defaultCluster].GetInterestedWatchCount(requestInfo) +// } -// RegisterWatch implements flowcontrol.Interface -func (k *KubeApfDelegator) RegisterWatch(r *http.Request) utilflowcontrol.ForgetWatchFunc { - // FIXME: Figure out the right way to implement WatchTracker - return k.delegates[defaultCluster].RegisterWatch(r) -} +// // RegisterWatch implements flowcontrol.Interface +// func (k *KubeApfDelegator) RegisterWatch(r *http.Request) utilflowcontrol.ForgetWatchFunc { +// // FIXME: Figure out the right way to implement WatchTracker +// return k.delegates[defaultCluster].RegisterWatch(r) +// } // Install implements flowcontrol.Interface func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { @@ -144,14 +145,14 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) k.requestWaitLimit, ) - // TODO: call scopedInformerFactory.Start? scopedInformerFactory.Start(k.stopCh) k.delegates[clusterName] = delegate // Start cluster scoped apf controller - go delegate.MaintainObservations(k.stopCh) + go delegate.MaintainObservations(k.stopCh) // FIXME: Metric observations need to work per-cluster go delegate.Run(k.stopCh) + // TODO: need to install per-cluster debug endpoint // delegate.Install(k.pathRecorderMux) klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) From 106091c529e90d75d7b0367acfc67e3eb00908d6 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Tue, 27 Sep 2022 20:12:55 +0000 Subject: [PATCH 06/28] Use plural for resource names --- pkg/reconciler/kubeapf/single_cluster_flow_control.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go index 1ad111d0e45..cddfb8cb649 100644 --- a/pkg/reconciler/kubeapf/single_cluster_flow_control.go +++ b/pkg/reconciler/kubeapf/single_cluster_flow_control.go @@ -87,7 +87,7 @@ var _ v1beta2.PriorityLevelConfigurationInformer = &scopedPriorityLevelConfigura func (s *scopedFlowSchemaInformer) Informer() cache.SharedIndexInformer { return &common.DelegatingInformer{ ClusterName: s.clusterName, - Resource: apiflowcontrolv1beta2.Resource("flowschema"), + Resource: apiflowcontrolv1beta2.Resource("flowschemas"), SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared DelegatingEventHandler: s.delegatingEventHandler, } @@ -105,7 +105,7 @@ func (s *scopedFlowSchemaInformer) Lister() flowcontrollisterv1beta2.FlowSchemaL func (s *scopedPriorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { return &common.DelegatingInformer{ ClusterName: s.clusterName, - Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfiguration"), + Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfigurations"), SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared DelegatingEventHandler: s.delegatingEventHandler, } From 64a1bf2805a93e1826a672dc87be62d2e7f1f566 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 13 Oct 2022 20:09:39 +0000 Subject: [PATCH 07/28] commit storage object count tracker controller and util code --- pkg/reconciler/soct/controller.go | 213 ++++++++++++++++++ .../util/delegating_event_handler.go | 123 ++++++++++ pkg/reconciler/util/delegating_informer.go | 28 +++ .../util/scoped_generic_informer.go | 149 ++++++++++++ pkg/reconciler/util/util.go | 25 ++ 5 files changed, 538 insertions(+) create mode 100644 pkg/reconciler/soct/controller.go create mode 100644 pkg/reconciler/util/delegating_event_handler.go create mode 100644 pkg/reconciler/util/delegating_informer.go create mode 100644 pkg/reconciler/util/scoped_generic_informer.go create mode 100644 pkg/reconciler/util/util.go diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go new file mode 100644 index 00000000000..5e0bc6b8ba4 --- /dev/null +++ b/pkg/reconciler/soct/controller.go @@ -0,0 +1,213 @@ +package soct + +import ( + "context" + "fmt" + "time" + + apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" + tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/logging" + "github.com/kcp-dev/logicalcluster/v2" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const SOCTControllerName = "kcp-storage-object-count-tracker-controller" + +// SOCTController monitors ClusterWorkspaces, APIBindings and CRDs and creates or deletes +// cluster-scoped storage object count observer goroutines and trackers accordingly. +// It contains a registry for Getter funcs of all resource types that will be used by +// observer goroutines to udpate the tracker object counts. It also acts as a multiplexer +// for API request-driven queries to retrieve the latest resource object count numbers. +type SOCTController struct { + cwQueue workqueue.RateLimitingInterface + crdQueue workqueue.RateLimitingInterface + getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry + tracker flowcontrolrequest.KcpStorageObjectCountTracker + + getClusterWorkspace func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) + getCRD func(key string) (*apiextensionsv1.CustomResourceDefinition, error) + // listCRDs func() ([]*apiextensionsv1.CustomResourceDefinition, error) +} + +// NewSOCTController +func NewSOCTController( + clusterWorkspacesInformer tenancyinformers.ClusterWorkspaceInformer, + crdInformer apiextensionsinformers.CustomResourceDefinitionInformer, + getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry, + tracker flowcontrolrequest.KcpStorageObjectCountTracker, +) *SOCTController { + c := &SOCTController{ + cwQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), SOCTControllerName), + crdQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), SOCTControllerName), + getterRegistry: getterRegistry, + tracker: tracker, + getClusterWorkspace: func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) { + return clusterWorkspacesInformer.Lister().Get(key) + }, + getCRD: func(key string) (*apiextensionsv1.CustomResourceDefinition, error) { + return crdInformer.Lister().Get(key) + }, + // listCRDs: func() ([]*apiextensionsv1.CustomResourceDefinition, error) { + // return crdInformer.Lister().List(labels.Everything()) + // }, + } + + clusterWorkspacesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueClusterWorkspace, + DeleteFunc: c.enqueueClusterWorkspace, + }) + + crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueCrd, + DeleteFunc: c.enqueueCrd, + }) + return c +} + +func (c *SOCTController) enqueueClusterWorkspace(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + runtime.HandleError(err) + return + } + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), SOCTControllerName), key) + logger.V(2).Info("queueing ClusterWorkspace") + c.cwQueue.Add(key) +} + +func (c *SOCTController) enqueueCrd(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + runtime.HandleError(err) + return + } + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), SOCTControllerName), key) + logger.V(2).Info("queueing CRD") + c.crdQueue.Add(key) +} + +// Run starts the SOCT controller. It needs to start updating +// counters in trackers for APF to work +func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { + defer runtime.HandleCrash() + defer c.cwQueue.ShutDown() + defer c.crdQueue.ShutDown() + + klog.Infof("Starting %s controller", SOCTControllerName) + defer klog.Infof("Shutting down %s controller", SOCTControllerName) + + go wait.Until(func() { c.startClusterWorkspaceWorker(ctx) }, time.Second, stop) + go wait.Until(func() { c.startCrdWorker(ctx) }, time.Second, stop) + + <-stop +} + +func (c *SOCTController) startClusterWorkspaceWorker(ctx context.Context) { + for c.processNext(ctx, c.cwQueue, c.processClusterWorkspace) { + } +} + +func (c *SOCTController) startCrdWorker(ctx context.Context) { + for c.processNext(ctx, c.crdQueue, c.processCrd) { + } +} + +func (c *SOCTController) processNext( + ctx context.Context, + queue workqueue.RateLimitingInterface, + processFunc func(ctx context.Context, key string) error, +) bool { + // Wait until there is a new item in the working queue + k, quit := queue.Get() + if quit { + return false + } + key := k.(string) + // No matter what, tell the queue we're done with this key, to unblock + // other workers. + defer queue.Done(key) + + if err := processFunc(ctx, key); err != nil { + runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", SOCTControllerName, key, err)) + queue.AddRateLimited(key) + return true + } + queue.Forget(key) + return true +} + +func getBoundCluster(crd *apiextensionsv1.CustomResourceDefinition) string { + cluster, ok := crd.GetAnnotations()[apisv1alpha1.AnnotationSchemaClusterKey] + if !ok { + return "" + } + return cluster +} + +func getResource(crd *apiextensionsv1.CustomResourceDefinition) string { + return crd.Spec.Names.Plural +} + +func (c *SOCTController) processCrd(ctx context.Context, key string) error { + logger := klog.FromContext(ctx) + + crd, err := c.getCRD(key) + cluster := getBoundCluster(crd) + resource := getResource(crd) + + logger = logging.WithObject(logger, crd) + if err != nil { + if kerrors.IsNotFound(err) { + logger.V(2).Info("CRD not found - stopping observer for it (if needed)") + c.tracker.StopObserving(cluster, resource) + return nil + } + return err + } + logger.V(2).Info("New CRD - starting observer for it (if needed)") + c.tracker.StartObserving(cluster, resource, + func() int64 { + return c.getterRegistry.GetObjectCount(logicalcluster.New(cluster), resource) + }, + ) + return nil +} + +func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string) error { + logger := klog.FromContext(ctx) + // e.g. root:orgws + parent, name := clusters.SplitClusterAwareKey(key) + + // turn it into root:org:ws + clusterName := parent.Join(name) + clusterNameStr := clusterName.String() + ws, err := c.getClusterWorkspace(key) + logger = logger.WithValues("logicalCluster", clusterNameStr) + logger.V(2).Info("processClusterWorkspace called") + if err != nil { + if kerrors.IsNotFound(err) { + logger.V(2).Info("ClusterWorkspace not found - deleting tracker") + c.tracker.DeleteTracker(clusterNameStr) + return nil + } + return err + } + logger = logging.WithObject(logger, ws) + c.tracker.CreateTracker(clusterNameStr) + logger.V(2).Info("Cluster tracker started") + // TODO: start observer goroutines + // for all api-resources in the logical cluster + + return nil +} diff --git a/pkg/reconciler/util/delegating_event_handler.go b/pkg/reconciler/util/delegating_event_handler.go new file mode 100644 index 00000000000..51b356440f0 --- /dev/null +++ b/pkg/reconciler/util/delegating_event_handler.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sync" + "time" + + "github.com/kcp-dev/logicalcluster/v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// DelegatingEventHandler multiplexes event handlers for multiple resource types and logical clusters. +type DelegatingEventHandler struct { + lock sync.RWMutex + eventHandlers map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler +} + +// NewDelegatingEventHandler returns a new delegatingEventHandler. +func NewDelegatingEventHandler() *DelegatingEventHandler { + return &DelegatingEventHandler{ + eventHandlers: map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler{}, + } +} + +// registerEventHandler registers an event handler, h, to receive events for the given resource/informer scoped only to +// clusterName. +func (d *DelegatingEventHandler) registerEventHandler( + resource schema.GroupResource, + informer cache.SharedIndexInformer, + clusterName logicalcluster.Name, + h cache.ResourceEventHandler, +) { + d.lock.Lock() + defer d.lock.Unlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} + d.eventHandlers[resource] = groupResourceHandlers + + informer.AddEventHandler(d.resourceEventHandlerFuncs(resource)) + } + + groupResourceHandlers[clusterName] = h +} + +// registerEventHandlerWithResyncPeriod registers an event handler, h, to receive events for the given resource/informer +// scoped only to clusterName, with the given resync period. +func (d *DelegatingEventHandler) registerEventHandlerWithResyncPeriod( + resource schema.GroupResource, + informer cache.SharedIndexInformer, + clusterName logicalcluster.Name, + h cache.ResourceEventHandler, + resyncPeriod time.Duration, +) { + d.lock.Lock() + defer d.lock.Unlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} + d.eventHandlers[resource] = groupResourceHandlers + + informer.AddEventHandlerWithResyncPeriod(d.resourceEventHandlerFuncs(resource), resyncPeriod) + } + + groupResourceHandlers[clusterName] = h +} + +func (d *DelegatingEventHandler) resourceEventHandlerFuncs(resource schema.GroupResource) cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if h := d.getEventHandler(resource, obj); h != nil { + h.OnAdd(obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + if h := d.getEventHandler(resource, oldObj); h != nil { + h.OnUpdate(oldObj, newObj) + } + }, + DeleteFunc: func(obj interface{}) { + if h := d.getEventHandler(resource, obj); h != nil { + h.OnDelete(obj) + } + }, + } +} + +// getEventHandler returns a cache.ResourceEventHandler for resource and the logicalcluster.Name for obj. +func (d *DelegatingEventHandler) getEventHandler(resource schema.GroupResource, obj interface{}) cache.ResourceEventHandler { + clusterName := ClusterNameForObj(obj) + if clusterName.Empty() { + return nil + } + + d.lock.RLock() + defer d.lock.RUnlock() + + groupResourceHandlers, ok := d.eventHandlers[resource] + if !ok { + return nil + } + + return groupResourceHandlers[clusterName] +} diff --git a/pkg/reconciler/util/delegating_informer.go b/pkg/reconciler/util/delegating_informer.go new file mode 100644 index 00000000000..84dae2d5b15 --- /dev/null +++ b/pkg/reconciler/util/delegating_informer.go @@ -0,0 +1,28 @@ +package util + +import ( + "time" + + "github.com/kcp-dev/logicalcluster/v2" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// DelegatingInformer embeds a cache.SharedIndexInformer, delegating adding event handlers to +// registerEventHandlerForCluster. +type DelegatingInformer struct { + ClusterName logicalcluster.Name + Resource schema.GroupResource + cache.SharedIndexInformer + DelegatingEventHandler *DelegatingEventHandler +} + +// AddEventHandler registers with the delegating event handler. +func (d *DelegatingInformer) AddEventHandler(handler cache.ResourceEventHandler) { + d.DelegatingEventHandler.registerEventHandler(d.Resource, d.SharedIndexInformer, d.ClusterName, handler) +} + +// AddEventHandlerWithResyncPeriod registers with the delegating event handler with the given resync period. +func (d *DelegatingInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + d.DelegatingEventHandler.registerEventHandlerWithResyncPeriod(d.Resource, d.SharedIndexInformer, d.ClusterName, handler, resyncPeriod) +} diff --git a/pkg/reconciler/util/scoped_generic_informer.go b/pkg/reconciler/util/scoped_generic_informer.go new file mode 100644 index 00000000000..127cbb70d98 --- /dev/null +++ b/pkg/reconciler/util/scoped_generic_informer.go @@ -0,0 +1,149 @@ +package util + +import ( + "github.com/kcp-dev/kcp/pkg/indexers" + "github.com/kcp-dev/logicalcluster/v2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" +) + +// scopedGenericInformer wraps an informers.GenericInformer and produces instances of cache.GenericLister that are +// scoped to a single logical cluster. +type scopedGenericInformer struct { + delegate informers.GenericInformer + clusterName logicalcluster.Name + resource schema.GroupResource + delegatingEventHandler *DelegatingEventHandler +} + +// Informer invokes Informer() on the underlying informers.GenericInformer. +func (s *scopedGenericInformer) Informer() cache.SharedIndexInformer { + return &DelegatingInformer{ + ClusterName: s.clusterName, + SharedIndexInformer: s.delegate.Informer(), + DelegatingEventHandler: s.delegatingEventHandler, + } +} + +// Lister returns an implementation of cache.GenericLister that is scoped to a single logical cluster. +func (s *scopedGenericInformer) Lister() cache.GenericLister { + return &scopedGenericLister{ + indexer: s.delegate.Informer().GetIndexer(), + clusterName: s.clusterName, + resource: s.resource, + } +} + +// scopedGenericLister wraps a cache.Indexer to implement a cache.GenericLister that is scoped to a single logical +// cluster. +type scopedGenericLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name + resource schema.GroupResource +} + +// List returns all instances from the cache.Indexer scoped to a single logical cluster and matching selector. +func (s *scopedGenericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + ret = append(ret, obj.(runtime.Object)) + }) + return ret, err +} + +// ByNamespace returns an implementation of cache.GenericNamespaceLister that is scoped to a single logical cluster. +func (s *scopedGenericLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &scopedGenericNamespaceLister{ + indexer: s.indexer, + clusterName: s.clusterName, + namespace: namespace, + resource: s.resource, + } +} + +// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster. +func (s *scopedGenericLister) Get(name string) (runtime.Object, error) { + key := clusters.ToClusterAwareKey(s.clusterName, name) + obj, exists, err := s.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +func ListByIndex(indexer cache.Indexer, indexName, indexValue string, selector labels.Selector, appendFunc func(obj interface{})) error { + selectAll := selector == nil || selector.Empty() + + list, err := indexer.ByIndex(indexName, indexValue) + if err != nil { + return err + } + + for i := range list { + if selectAll { + appendFunc(list[i]) + continue + } + + metadata, err := meta.Accessor(list[i]) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFunc(list[i]) + } + } + + return nil +} + +// scopedGenericNamespaceLister wraps a cache.Indexer to implement a cache.GenericNamespaceLister that is scoped to a +// single logical cluster. +type scopedGenericNamespaceLister struct { + indexer cache.Indexer + clusterName logicalcluster.Name + namespace string + resource schema.GroupResource +} + +// List lists all instances from the cache.Indexer scoped to a single logical cluster and namespace, and matching +// selector. +func (s *scopedGenericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + // To support e.g. quota for cluster-scoped resources, we've hacked the k8s quota to use namespace="" when + // checking quota for cluster-scoped resources. But because all the upstream quota code is written to only + // support namespace-scoped resources, we have to hack the "namespace lister" to support returning all items + // when its namespace is "". + var indexName, indexValue string + if s.namespace == "" { + indexName = indexers.ByLogicalCluster + indexValue = s.clusterName.String() + } else { + indexName = indexers.ByLogicalClusterAndNamespace + indexValue = clusters.ToClusterAwareKey(s.clusterName, s.namespace) + } + err = ListByIndex(s.indexer, indexName, indexValue, selector, func(obj interface{}) { + ret = append(ret, obj.(runtime.Object)) + }) + return ret, err +} + +// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster and +// namespace. +func (s *scopedGenericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/pkg/reconciler/util/util.go b/pkg/reconciler/util/util.go new file mode 100644 index 00000000000..46e831b54a2 --- /dev/null +++ b/pkg/reconciler/util/util.go @@ -0,0 +1,25 @@ +package util + +import ( + "github.com/kcp-dev/logicalcluster/v2" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" +) + +func ClusterNameForObj(obj interface{}) logicalcluster.Name { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return logicalcluster.Name{} + } + + _, clusterAndName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + utilruntime.HandleError(err) + return logicalcluster.Name{} + } + + cluster, _ := clusters.SplitClusterAwareKey(clusterAndName) + return cluster +} From 7d4108c049a42a9e076190e0512b958ea221e5ed Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 13 Oct 2022 20:32:29 +0000 Subject: [PATCH 08/28] adding logs for debug --- pkg/reconciler/soct/controller.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 5e0bc6b8ba4..e556553339d 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -37,7 +37,6 @@ type SOCTController struct { getClusterWorkspace func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) getCRD func(key string) (*apiextensionsv1.CustomResourceDefinition, error) - // listCRDs func() ([]*apiextensionsv1.CustomResourceDefinition, error) } // NewSOCTController @@ -48,8 +47,9 @@ func NewSOCTController( tracker flowcontrolrequest.KcpStorageObjectCountTracker, ) *SOCTController { c := &SOCTController{ - cwQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), SOCTControllerName), - crdQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), SOCTControllerName), + cwQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + crdQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + getterRegistry: getterRegistry, tracker: tracker, getClusterWorkspace: func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) { @@ -58,9 +58,6 @@ func NewSOCTController( getCRD: func(key string) (*apiextensionsv1.CustomResourceDefinition, error) { return crdInformer.Lister().Get(key) }, - // listCRDs: func() ([]*apiextensionsv1.CustomResourceDefinition, error) { - // return crdInformer.Lister().List(labels.Everything()) - // }, } clusterWorkspacesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ From cc8f90254aaee652ec085742558bbf530d8c04b5 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Tue, 18 Oct 2022 00:48:42 +0000 Subject: [PATCH 09/28] rename run functions --- pkg/reconciler/soct/controller.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index e556553339d..33ce766ed27 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -104,18 +104,18 @@ func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { klog.Infof("Starting %s controller", SOCTControllerName) defer klog.Infof("Shutting down %s controller", SOCTControllerName) - go wait.Until(func() { c.startClusterWorkspaceWorker(ctx) }, time.Second, stop) - go wait.Until(func() { c.startCrdWorker(ctx) }, time.Second, stop) + go wait.Until(func() { c.runClusterWorkspaceWorker(ctx) }, time.Second, stop) + go wait.Until(func() { c.runCrdWorker(ctx) }, time.Second, stop) <-stop } -func (c *SOCTController) startClusterWorkspaceWorker(ctx context.Context) { +func (c *SOCTController) runClusterWorkspaceWorker(ctx context.Context) { for c.processNext(ctx, c.cwQueue, c.processClusterWorkspace) { } } -func (c *SOCTController) startCrdWorker(ctx context.Context) { +func (c *SOCTController) runCrdWorker(ctx context.Context) { for c.processNext(ctx, c.crdQueue, c.processCrd) { } } @@ -131,6 +131,11 @@ func (c *SOCTController) processNext( return false } key := k.(string) + + logger := logging.WithQueueKey(klog.FromContext(ctx), key) + ctx = klog.NewContext(ctx, logger) + logger.V(1).Info("soct processing key") + // No matter what, tell the queue we're done with this key, to unblock // other workers. defer queue.Done(key) From 8131fc9a27a2046191876e870ba2c80e9c34a1f6 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Tue, 18 Oct 2022 00:50:33 +0000 Subject: [PATCH 10/28] rename common to util --- .../common/delegating_event_handler.go | 123 --------------- pkg/reconciler/common/delegating_informer.go | 28 ---- .../common/scoped_generic_informer.go | 149 ------------------ pkg/reconciler/common/util.go | 25 --- pkg/reconciler/kubeapf/kube_apf.go | 5 +- .../kubeapf/scoped_shared_informer_factory.go | 8 +- .../kubeapf/single_cluster_flow_control.go | 18 +-- 7 files changed, 15 insertions(+), 341 deletions(-) delete mode 100644 pkg/reconciler/common/delegating_event_handler.go delete mode 100644 pkg/reconciler/common/delegating_informer.go delete mode 100644 pkg/reconciler/common/scoped_generic_informer.go delete mode 100644 pkg/reconciler/common/util.go diff --git a/pkg/reconciler/common/delegating_event_handler.go b/pkg/reconciler/common/delegating_event_handler.go deleted file mode 100644 index fb2b4b39f92..00000000000 --- a/pkg/reconciler/common/delegating_event_handler.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "sync" - "time" - - "github.com/kcp-dev/logicalcluster/v2" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -// DelegatingEventHandler multiplexes event handlers for multiple resource types and logical clusters. -type DelegatingEventHandler struct { - lock sync.RWMutex - eventHandlers map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler -} - -// NewDelegatingEventHandler returns a new delegatingEventHandler. -func NewDelegatingEventHandler() *DelegatingEventHandler { - return &DelegatingEventHandler{ - eventHandlers: map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler{}, - } -} - -// registerEventHandler registers an event handler, h, to receive events for the given resource/informer scoped only to -// clusterName. -func (d *DelegatingEventHandler) registerEventHandler( - resource schema.GroupResource, - informer cache.SharedIndexInformer, - clusterName logicalcluster.Name, - h cache.ResourceEventHandler, -) { - d.lock.Lock() - defer d.lock.Unlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} - d.eventHandlers[resource] = groupResourceHandlers - - informer.AddEventHandler(d.resourceEventHandlerFuncs(resource)) - } - - groupResourceHandlers[clusterName] = h -} - -// registerEventHandlerWithResyncPeriod registers an event handler, h, to receive events for the given resource/informer -// scoped only to clusterName, with the given resync period. -func (d *DelegatingEventHandler) registerEventHandlerWithResyncPeriod( - resource schema.GroupResource, - informer cache.SharedIndexInformer, - clusterName logicalcluster.Name, - h cache.ResourceEventHandler, - resyncPeriod time.Duration, -) { - d.lock.Lock() - defer d.lock.Unlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} - d.eventHandlers[resource] = groupResourceHandlers - - informer.AddEventHandlerWithResyncPeriod(d.resourceEventHandlerFuncs(resource), resyncPeriod) - } - - groupResourceHandlers[clusterName] = h -} - -func (d *DelegatingEventHandler) resourceEventHandlerFuncs(resource schema.GroupResource) cache.ResourceEventHandlerFuncs { - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - if h := d.getEventHandler(resource, obj); h != nil { - h.OnAdd(obj) - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { - if h := d.getEventHandler(resource, oldObj); h != nil { - h.OnUpdate(oldObj, newObj) - } - }, - DeleteFunc: func(obj interface{}) { - if h := d.getEventHandler(resource, obj); h != nil { - h.OnDelete(obj) - } - }, - } -} - -// getEventHandler returns a cache.ResourceEventHandler for resource and the logicalcluster.Name for obj. -func (d *DelegatingEventHandler) getEventHandler(resource schema.GroupResource, obj interface{}) cache.ResourceEventHandler { - clusterName := ClusterNameForObj(obj) - if clusterName.Empty() { - return nil - } - - d.lock.RLock() - defer d.lock.RUnlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - return nil - } - - return groupResourceHandlers[clusterName] -} diff --git a/pkg/reconciler/common/delegating_informer.go b/pkg/reconciler/common/delegating_informer.go deleted file mode 100644 index 3875b45f8d2..00000000000 --- a/pkg/reconciler/common/delegating_informer.go +++ /dev/null @@ -1,28 +0,0 @@ -package common - -import ( - "time" - - "github.com/kcp-dev/logicalcluster/v2" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -// DelegatingInformer embeds a cache.SharedIndexInformer, delegating adding event handlers to -// registerEventHandlerForCluster. -type DelegatingInformer struct { - ClusterName logicalcluster.Name - Resource schema.GroupResource - cache.SharedIndexInformer - DelegatingEventHandler *DelegatingEventHandler -} - -// AddEventHandler registers with the delegating event handler. -func (d *DelegatingInformer) AddEventHandler(handler cache.ResourceEventHandler) { - d.DelegatingEventHandler.registerEventHandler(d.Resource, d.SharedIndexInformer, d.ClusterName, handler) -} - -// AddEventHandlerWithResyncPeriod registers with the delegating event handler with the given resync period. -func (d *DelegatingInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { - d.DelegatingEventHandler.registerEventHandlerWithResyncPeriod(d.Resource, d.SharedIndexInformer, d.ClusterName, handler, resyncPeriod) -} diff --git a/pkg/reconciler/common/scoped_generic_informer.go b/pkg/reconciler/common/scoped_generic_informer.go deleted file mode 100644 index 7588d7bf0e8..00000000000 --- a/pkg/reconciler/common/scoped_generic_informer.go +++ /dev/null @@ -1,149 +0,0 @@ -package common - -import ( - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/logicalcluster/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" -) - -// scopedGenericInformer wraps an informers.GenericInformer and produces instances of cache.GenericLister that are -// scoped to a single logical cluster. -type scopedGenericInformer struct { - delegate informers.GenericInformer - clusterName logicalcluster.Name - resource schema.GroupResource - delegatingEventHandler *DelegatingEventHandler -} - -// Informer invokes Informer() on the underlying informers.GenericInformer. -func (s *scopedGenericInformer) Informer() cache.SharedIndexInformer { - return &DelegatingInformer{ - ClusterName: s.clusterName, - SharedIndexInformer: s.delegate.Informer(), - DelegatingEventHandler: s.delegatingEventHandler, - } -} - -// Lister returns an implementation of cache.GenericLister that is scoped to a single logical cluster. -func (s *scopedGenericInformer) Lister() cache.GenericLister { - return &scopedGenericLister{ - indexer: s.delegate.Informer().GetIndexer(), - clusterName: s.clusterName, - resource: s.resource, - } -} - -// scopedGenericLister wraps a cache.Indexer to implement a cache.GenericLister that is scoped to a single logical -// cluster. -type scopedGenericLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name - resource schema.GroupResource -} - -// List returns all instances from the cache.Indexer scoped to a single logical cluster and matching selector. -func (s *scopedGenericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - err = ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { - ret = append(ret, obj.(runtime.Object)) - }) - return ret, err -} - -// ByNamespace returns an implementation of cache.GenericNamespaceLister that is scoped to a single logical cluster. -func (s *scopedGenericLister) ByNamespace(namespace string) cache.GenericNamespaceLister { - return &scopedGenericNamespaceLister{ - indexer: s.indexer, - clusterName: s.clusterName, - namespace: namespace, - resource: s.resource, - } -} - -// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster. -func (s *scopedGenericLister) Get(name string) (runtime.Object, error) { - key := clusters.ToClusterAwareKey(s.clusterName, name) - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} - -func ListByIndex(indexer cache.Indexer, indexName, indexValue string, selector labels.Selector, appendFunc func(obj interface{})) error { - selectAll := selector == nil || selector.Empty() - - list, err := indexer.ByIndex(indexName, indexValue) - if err != nil { - return err - } - - for i := range list { - if selectAll { - appendFunc(list[i]) - continue - } - - metadata, err := meta.Accessor(list[i]) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFunc(list[i]) - } - } - - return nil -} - -// scopedGenericNamespaceLister wraps a cache.Indexer to implement a cache.GenericNamespaceLister that is scoped to a -// single logical cluster. -type scopedGenericNamespaceLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name - namespace string - resource schema.GroupResource -} - -// List lists all instances from the cache.Indexer scoped to a single logical cluster and namespace, and matching -// selector. -func (s *scopedGenericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - // To support e.g. quota for cluster-scoped resources, we've hacked the k8s quota to use namespace="" when - // checking quota for cluster-scoped resources. But because all the upstream quota code is written to only - // support namespace-scoped resources, we have to hack the "namespace lister" to support returning all items - // when its namespace is "". - var indexName, indexValue string - if s.namespace == "" { - indexName = indexers.ByLogicalCluster - indexValue = s.clusterName.String() - } else { - indexName = indexers.ByLogicalClusterAndNamespace - indexValue = clusters.ToClusterAwareKey(s.clusterName, s.namespace) - } - err = ListByIndex(s.indexer, indexName, indexValue, selector, func(obj interface{}) { - ret = append(ret, obj.(runtime.Object)) - }) - return ret, err -} - -// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster and -// namespace. -func (s *scopedGenericNamespaceLister) Get(name string) (runtime.Object, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} diff --git a/pkg/reconciler/common/util.go b/pkg/reconciler/common/util.go deleted file mode 100644 index c0dd3defd51..00000000000 --- a/pkg/reconciler/common/util.go +++ /dev/null @@ -1,25 +0,0 @@ -package common - -import ( - "github.com/kcp-dev/logicalcluster/v2" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" -) - -func ClusterNameForObj(obj interface{}) logicalcluster.Name { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return logicalcluster.Name{} - } - - _, clusterAndName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - utilruntime.HandleError(err) - return logicalcluster.Name{} - } - - cluster, _ := clusters.SplitClusterAwareKey(clusterAndName) - return cluster -} diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index c2818e6947e..500740d6055 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -138,7 +138,6 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(clusterName) flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() delegate = utilflowcontrol.New( - clusterName.String()+"-controller", scopedInformerFactory, flowcontrolClient, k.serverConcurrencyLimit, @@ -149,11 +148,11 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) k.delegates[clusterName] = delegate // Start cluster scoped apf controller - go delegate.MaintainObservations(k.stopCh) // FIXME: Metric observations need to work per-cluster + go delegate.MaintainObservations(k.stopCh) // FIXME: Metric observations need to work per-cluster --> beware of metrics explosion go delegate.Run(k.stopCh) // TODO: need to install per-cluster debug endpoint - // delegate.Install(k.pathRecorderMux) + delegate.Install(k.pathRecorderMux) klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) return delegate, nil diff --git a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go index 951c36f5721..8bb02f0870b 100644 --- a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go +++ b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go @@ -3,7 +3,7 @@ package kubeapf import ( reflect "reflect" - "github.com/kcp-dev/kcp/pkg/reconciler/common" + "github.com/kcp-dev/kcp/pkg/reconciler/util" "github.com/kcp-dev/logicalcluster/v2" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -34,13 +34,13 @@ import ( // targets a specific logical cluster type scopingSharedInformerFactory struct { factory informers.SharedInformerFactory - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } func newScopingSharedInformerFactory(factory informers.SharedInformerFactory) *scopingSharedInformerFactory { return &scopingSharedInformerFactory{ factory: factory, - delegatingEventHandler: common.NewDelegatingEventHandler(), + delegatingEventHandler: util.NewDelegatingEventHandler(), } } @@ -58,7 +58,7 @@ func (f *scopingSharedInformerFactory) ForCluster(clusterName logicalcluster.Nam type scopedSharedInformerFactory struct { delegate informers.SharedInformerFactory clusterName logicalcluster.Name - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } var _ informers.SharedInformerFactory = &scopedSharedInformerFactory{} diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go index cddfb8cb649..8308c9f8b2f 100644 --- a/pkg/reconciler/kubeapf/single_cluster_flow_control.go +++ b/pkg/reconciler/kubeapf/single_cluster_flow_control.go @@ -2,7 +2,7 @@ package kubeapf import ( "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/reconciler/common" + "github.com/kcp-dev/kcp/pkg/reconciler/util" "github.com/kcp-dev/logicalcluster/v2" apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" "k8s.io/apimachinery/pkg/api/errors" @@ -19,7 +19,7 @@ import ( type scopedFlowcontrol struct { clusterName logicalcluster.Name delegate flowcontrol.Interface - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } var _ flowcontrol.Interface = &scopedFlowcontrol{} @@ -46,7 +46,7 @@ func (f *scopedFlowcontrol) V1beta2() v1beta2.Interface { type scopedFlowcontrolV1Beta2 struct { clusterName logicalcluster.Name delegate v1beta2.Interface - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } var _ v1beta2.Interface = &scopedFlowcontrolV1Beta2{} @@ -72,12 +72,12 @@ func (f *scopedFlowcontrolV1Beta2) PriorityLevelConfigurations() v1beta2.Priorit type scopedFlowSchemaInformer struct { clusterName logicalcluster.Name delegate v1beta2.FlowSchemaInformer - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } type scopedPriorityLevelConfigurationInformer struct { clusterName logicalcluster.Name delegate v1beta2.PriorityLevelConfigurationInformer - delegatingEventHandler *common.DelegatingEventHandler + delegatingEventHandler *util.DelegatingEventHandler } var _ v1beta2.FlowSchemaInformer = &scopedFlowSchemaInformer{} @@ -85,7 +85,7 @@ var _ v1beta2.PriorityLevelConfigurationInformer = &scopedPriorityLevelConfigura // Informer implements v1beta2.FlowSchemaInformer func (s *scopedFlowSchemaInformer) Informer() cache.SharedIndexInformer { - return &common.DelegatingInformer{ + return &util.DelegatingInformer{ ClusterName: s.clusterName, Resource: apiflowcontrolv1beta2.Resource("flowschemas"), SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared @@ -103,7 +103,7 @@ func (s *scopedFlowSchemaInformer) Lister() flowcontrollisterv1beta2.FlowSchemaL // Informer implements v1beta2.PriorityLevelConfigurationInformer func (s *scopedPriorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return &common.DelegatingInformer{ + return &util.DelegatingInformer{ ClusterName: s.clusterName, Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfigurations"), SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared @@ -141,7 +141,7 @@ func (s *SingleClusterFlowSchemaLister) Get(name string) (*apiflowcontrolv1beta2 // List implements v1beta2.FlowSchemaLister func (s *SingleClusterFlowSchemaLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.FlowSchema, err error) { - if err := common.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + if err := util.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { ret = append(ret, obj.(*apiflowcontrolv1beta2.FlowSchema)) }); err != nil { return nil, err @@ -172,7 +172,7 @@ func (s *SingleClusterPriorityLevelConfigurationLister) Get(name string) (*apifl // List implements v1beta2.PriorityLevelConfigurationLister func (s *SingleClusterPriorityLevelConfigurationLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.PriorityLevelConfiguration, err error) { - if err := common.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { + if err := util.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { ret = append(ret, obj.(*apiflowcontrolv1beta2.PriorityLevelConfiguration)) }); err != nil { return nil, err From b41a0cf426bc4b3c21c6ef7389eb12fa5e8cbd80 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Tue, 18 Oct 2022 00:53:08 +0000 Subject: [PATCH 11/28] Install kcp SOCT controller --- pkg/server/controllers.go | 47 +++++++++++++++++++++++++++++++++++++++ pkg/server/server.go | 6 +++++ 2 files changed, 53 insertions(+) diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index cb1b28420cb..c4f152a1b4f 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -67,6 +67,7 @@ import ( "github.com/kcp-dev/kcp/pkg/reconciler/kubequota" schedulinglocationstatus "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/location" schedulingplacement "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/placement" + "github.com/kcp-dev/kcp/pkg/reconciler/soct" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/bootstrap" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/clusterworkspace" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/clusterworkspacedeletion" @@ -1181,6 +1182,52 @@ func (s *Server) installKubeQuotaController( return nil } +func (s *Server) installKcpSOCTController( + ctx context.Context, + config *rest.Config, + server *genericapiserver.GenericAPIServer, +) error { + controllerName := "kcp-storage-object-count-tracker-controller" + // config = rest.CopyConfig(config) + + // config = rest.AddUserAgent(config, controllerName) + // kubeClusterClient, err := kubernetesclient.NewClusterForConfig(config) + // if err != nil { + // return err + // } + + c := soct.NewSOCTController( + s.KcpSharedInformerFactory.Tenancy().V1alpha1().ClusterWorkspaces(), + s.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions(), + s.GenericConfig.StorageObjectCountGetterRegistry, + s.GenericConfig.KcpStorageObjectCountTracker, + ) + + if err := server.AddPostStartHook(postStartHookName(controllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(controllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + // nolint:nilerr + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Run(ctx, hookContext.StopCh) + + return nil + }); err != nil { + return err + } + + // if err := server.AddPreShutdownHook(controllerName, func() error { + // close(s.quotaAdmissionStopCh) + // return nil + // }); err != nil { + // return err + // } + + return nil +} + func (s *Server) installApiExportIdentityController(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { if s.Options.Extra.ShardName == tenancyv1alpha1.RootShard { return nil diff --git a/pkg/server/server.go b/pkg/server/server.go index 7a56cf97b0d..bcbc5446a3a 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -519,6 +519,12 @@ func (s *Server) Run(ctx context.Context) error { } } + if s.Options.Controllers.EnableAll { + if err := s.installKcpSOCTController(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + if s.Options.Virtual.Enabled { if err := s.installVirtualWorkspaces(ctx, controllerConfig, delegationChainHead, s.GenericConfig.Authentication, s.GenericConfig.ExternalAddress, s.GenericConfig.AuditPolicyRuleEvaluator, s.preHandlerChainMux); err != nil { return err From cb028cf5e5094e4fc8620510dd751b5e2207c8ef Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Wed, 19 Oct 2022 00:42:23 +0000 Subject: [PATCH 12/28] Use dynamic discovery shared informer for SOCT controller --- pkg/reconciler/soct/controller.go | 138 ++++++++++++------------------ pkg/server/controllers.go | 15 ++-- 2 files changed, 65 insertions(+), 88 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 33ce766ed27..1e85a9e21a0 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -5,17 +5,16 @@ import ( "fmt" "time" - apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/informer" "github.com/kcp-dev/kcp/pkg/logging" "github.com/kcp-dev/logicalcluster/v2" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + kubernetesclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clusters" "k8s.io/client-go/util/workqueue" @@ -30,45 +29,36 @@ const SOCTControllerName = "kcp-storage-object-count-tracker-controller" // observer goroutines to udpate the tracker object counts. It also acts as a multiplexer // for API request-driven queries to retrieve the latest resource object count numbers. type SOCTController struct { - cwQueue workqueue.RateLimitingInterface - crdQueue workqueue.RateLimitingInterface - getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry - tracker flowcontrolrequest.KcpStorageObjectCountTracker + dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory + cwQueue workqueue.RateLimitingInterface + getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry + tracker flowcontrolrequest.KcpStorageObjectCountTracker getClusterWorkspace func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) - getCRD func(key string) (*apiextensionsv1.CustomResourceDefinition, error) } // NewSOCTController func NewSOCTController( + kubeClusterClient *kubernetesclient.Cluster, clusterWorkspacesInformer tenancyinformers.ClusterWorkspaceInformer, - crdInformer apiextensionsinformers.CustomResourceDefinitionInformer, + dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory, getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry, tracker flowcontrolrequest.KcpStorageObjectCountTracker, ) *SOCTController { c := &SOCTController{ - cwQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - crdQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - - getterRegistry: getterRegistry, - tracker: tracker, + dynamicDiscoverySharedInformerFactory: dynamicDiscoverySharedInformerFactory, + cwQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + getterRegistry: getterRegistry, + tracker: tracker, getClusterWorkspace: func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) { return clusterWorkspacesInformer.Lister().Get(key) }, - getCRD: func(key string) (*apiextensionsv1.CustomResourceDefinition, error) { - return crdInformer.Lister().Get(key) - }, } clusterWorkspacesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.enqueueClusterWorkspace, DeleteFunc: c.enqueueClusterWorkspace, }) - - crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueueCrd, - DeleteFunc: c.enqueueCrd, - }) return c } @@ -83,29 +73,16 @@ func (c *SOCTController) enqueueClusterWorkspace(obj interface{}) { c.cwQueue.Add(key) } -func (c *SOCTController) enqueueCrd(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), SOCTControllerName), key) - logger.V(2).Info("queueing CRD") - c.crdQueue.Add(key) -} - // Run starts the SOCT controller. It needs to start updating // counters in trackers for APF to work func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { defer runtime.HandleCrash() defer c.cwQueue.ShutDown() - defer c.crdQueue.ShutDown() klog.Infof("Starting %s controller", SOCTControllerName) defer klog.Infof("Shutting down %s controller", SOCTControllerName) go wait.Until(func() { c.runClusterWorkspaceWorker(ctx) }, time.Second, stop) - go wait.Until(func() { c.runCrdWorker(ctx) }, time.Second, stop) <-stop } @@ -115,11 +92,6 @@ func (c *SOCTController) runClusterWorkspaceWorker(ctx context.Context) { } } -func (c *SOCTController) runCrdWorker(ctx context.Context) { - for c.processNext(ctx, c.crdQueue, c.processCrd) { - } -} - func (c *SOCTController) processNext( ctx context.Context, queue workqueue.RateLimitingInterface, @@ -134,7 +106,7 @@ func (c *SOCTController) processNext( logger := logging.WithQueueKey(klog.FromContext(ctx), key) ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("soct processing key") + logger.V(1).Info("SOCT-processing-key") // No matter what, tell the queue we're done with this key, to unblock // other workers. @@ -149,43 +121,6 @@ func (c *SOCTController) processNext( return true } -func getBoundCluster(crd *apiextensionsv1.CustomResourceDefinition) string { - cluster, ok := crd.GetAnnotations()[apisv1alpha1.AnnotationSchemaClusterKey] - if !ok { - return "" - } - return cluster -} - -func getResource(crd *apiextensionsv1.CustomResourceDefinition) string { - return crd.Spec.Names.Plural -} - -func (c *SOCTController) processCrd(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - - crd, err := c.getCRD(key) - cluster := getBoundCluster(crd) - resource := getResource(crd) - - logger = logging.WithObject(logger, crd) - if err != nil { - if kerrors.IsNotFound(err) { - logger.V(2).Info("CRD not found - stopping observer for it (if needed)") - c.tracker.StopObserving(cluster, resource) - return nil - } - return err - } - logger.V(2).Info("New CRD - starting observer for it (if needed)") - c.tracker.StartObserving(cluster, resource, - func() int64 { - return c.getterRegistry.GetObjectCount(logicalcluster.New(cluster), resource) - }, - ) - return nil -} - func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string) error { logger := klog.FromContext(ctx) // e.g. root:orgws @@ -208,8 +143,49 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string logger = logging.WithObject(logger, ws) c.tracker.CreateTracker(clusterNameStr) logger.V(2).Info("Cluster tracker started") - // TODO: start observer goroutines - // for all api-resources in the logical cluster - + // c.updateObservers(ctx, clusterName) + + // TODO: start a goroutine to subscribe to changes in API + apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterName.String()) + go func() { + var discoveryCancel func() + + for { + select { + case <-ctx.Done(): + if discoveryCancel != nil { + discoveryCancel() + } + + return + case <-apisChanged: + if discoveryCancel != nil { + discoveryCancel() + } + + logger.V(4).Info("got API change notification") + + ctx, discoveryCancel = context.WithCancel(ctx) + c.updateObservers(ctx, clusterName) + } + } + }() return nil } + +func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { + // Start observer goroutines for all api resources in the logical cluster + listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() + // StartObserving might be called multiple times for the same resource + // subsequent calls will be ignored + for gvr := range listers { + c.tracker.StartObserving(cluster.String(), gvr.String(), + func() int64 { return c.getterRegistry.GetObjectCount(cluster, gvr.String()) }, + ) + } + for _, gvr := range notSynced { + c.tracker.StartObserving(cluster.String(), gvr.String(), + func() int64 { return c.getterRegistry.GetObjectCount(cluster, gvr.String()) }, + ) + } +} diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index c4f152a1b4f..6ff6938a82c 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -1188,17 +1188,18 @@ func (s *Server) installKcpSOCTController( server *genericapiserver.GenericAPIServer, ) error { controllerName := "kcp-storage-object-count-tracker-controller" - // config = rest.CopyConfig(config) + config = rest.CopyConfig(config) - // config = rest.AddUserAgent(config, controllerName) - // kubeClusterClient, err := kubernetesclient.NewClusterForConfig(config) - // if err != nil { - // return err - // } + config = rest.AddUserAgent(config, controllerName) + kubeClusterClient, err := kubernetesclient.NewClusterForConfig(config) + if err != nil { + return err + } c := soct.NewSOCTController( + kubeClusterClient, s.KcpSharedInformerFactory.Tenancy().V1alpha1().ClusterWorkspaces(), - s.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions(), + s.DynamicDiscoverySharedInformerFactory, s.GenericConfig.StorageObjectCountGetterRegistry, s.GenericConfig.KcpStorageObjectCountTracker, ) From 550c2eb537e4f02eaa5c8d4a4b7b840e09b6f09d Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Wed, 19 Oct 2022 00:50:43 +0000 Subject: [PATCH 13/28] Add unsubscribe call --- pkg/reconciler/soct/controller.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 1e85a9e21a0..5ace6d4772d 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -135,6 +135,8 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string if err != nil { if kerrors.IsNotFound(err) { logger.V(2).Info("ClusterWorkspace not found - deleting tracker") + c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterName.String()) + // FIXME: should also stop discovery threads c.tracker.DeleteTracker(clusterNameStr) return nil } From d8a19dd924c47ea1679b062e18b5ad14c7dbde05 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Wed, 19 Oct 2022 14:10:19 +0000 Subject: [PATCH 14/28] Use GroupResource string instead of GVR for consistency with GetterRegistry --- pkg/reconciler/soct/controller.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 5ace6d4772d..5091ed1f1f2 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -135,7 +135,7 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string if err != nil { if kerrors.IsNotFound(err) { logger.V(2).Info("ClusterWorkspace not found - deleting tracker") - c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterName.String()) + c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterNameStr) // FIXME: should also stop discovery threads c.tracker.DeleteTracker(clusterNameStr) return nil @@ -148,7 +148,7 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string // c.updateObservers(ctx, clusterName) // TODO: start a goroutine to subscribe to changes in API - apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterName.String()) + apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) go func() { var discoveryCancel func() @@ -178,16 +178,19 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { // Start observer goroutines for all api resources in the logical cluster listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() + // StartObserving might be called multiple times for the same resource // subsequent calls will be ignored for gvr := range listers { - c.tracker.StartObserving(cluster.String(), gvr.String(), - func() int64 { return c.getterRegistry.GetObjectCount(cluster, gvr.String()) }, + resourceName := gvr.GroupResource().String() + c.tracker.StartObserving(cluster.String(), resourceName, + func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, ) } for _, gvr := range notSynced { - c.tracker.StartObserving(cluster.String(), gvr.String(), - func() int64 { return c.getterRegistry.GetObjectCount(cluster, gvr.String()) }, + resourceName := gvr.GroupResource().String() + c.tracker.StartObserving(cluster.String(), resourceName, + func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, ) } } From 1b24fc5fe10d66cea04083920e429703cefacb8e Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:36:26 +0000 Subject: [PATCH 15/28] Add placeholder for creating trackers of default logical clusters --- pkg/reconciler/soct/controller.go | 36 +++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 5091ed1f1f2..8f586f648c9 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -82,6 +82,13 @@ func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { klog.Infof("Starting %s controller", SOCTControllerName) defer klog.Infof("Shutting down %s controller", SOCTControllerName) + // Start trackers of default clusters + defaultClusters := []logicalcluster.Name{logicalcluster.New("root")} + for _, cluster := range defaultClusters { + c.startClusterTracker(ctx, cluster) + defer c.stopClusterTracker(ctx, cluster) + } + go wait.Until(func() { c.runClusterWorkspaceWorker(ctx) }, time.Second, stop) <-stop @@ -128,24 +135,27 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string // turn it into root:org:ws clusterName := parent.Join(name) - clusterNameStr := clusterName.String() ws, err := c.getClusterWorkspace(key) - logger = logger.WithValues("logicalCluster", clusterNameStr) + logger = logger.WithValues("logicalCluster", clusterName.String()) logger.V(2).Info("processClusterWorkspace called") if err != nil { if kerrors.IsNotFound(err) { logger.V(2).Info("ClusterWorkspace not found - deleting tracker") - c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterNameStr) - // FIXME: should also stop discovery threads - c.tracker.DeleteTracker(clusterNameStr) + c.stopClusterTracker(ctx, clusterName) return nil } return err } logger = logging.WithObject(logger, ws) - c.tracker.CreateTracker(clusterNameStr) + c.startClusterTracker(ctx, clusterName) logger.V(2).Info("Cluster tracker started") - // c.updateObservers(ctx, clusterName) + + return nil +} + +func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName logicalcluster.Name) { + clusterNameStr := clusterName.String() + c.tracker.CreateTracker(clusterNameStr) // TODO: start a goroutine to subscribe to changes in API apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) @@ -164,15 +174,19 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string if discoveryCancel != nil { discoveryCancel() } - - logger.V(4).Info("got API change notification") - + // logger.V(4).Info("got API change notification") ctx, discoveryCancel = context.WithCancel(ctx) c.updateObservers(ctx, clusterName) } } }() - return nil +} + +func (c *SOCTController) stopClusterTracker(ctx context.Context, clusterName logicalcluster.Name) { + clusterNameStr := clusterName.String() + c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterNameStr) + // FIXME: should also stop discovery threads + c.tracker.DeleteTracker(clusterNameStr) } func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { From 55c0cd215293c38c74b9ea205d2cbf2b5e6bdd0b Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:45:35 +0000 Subject: [PATCH 16/28] Add the list of default logical clusters --- pkg/reconciler/soct/controller.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 8f586f648c9..2c6887b98aa 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -83,7 +83,14 @@ func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { defer klog.Infof("Shutting down %s controller", SOCTControllerName) // Start trackers of default clusters - defaultClusters := []logicalcluster.Name{logicalcluster.New("root")} + defaultClusters := []logicalcluster.Name{ + logicalcluster.New("root"), + logicalcluster.New("system:system-crds"), + logicalcluster.New("system:shard"), + logicalcluster.New("system:admin"), + logicalcluster.New("system:bound-crds"), + logicalcluster.New("root:compute"), + } for _, cluster := range defaultClusters { c.startClusterTracker(ctx, cluster) defer c.stopClusterTracker(ctx, cluster) From 0ef41d161e7747d7e52b2affc5bca0ca66c46698 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Fri, 21 Oct 2022 20:57:14 +0000 Subject: [PATCH 17/28] changes for using context --- pkg/reconciler/soct/controller.go | 25 +++++++++++++++++-------- pkg/server/controllers.go | 9 +-------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 2c6887b98aa..b8a2cd780f1 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -75,12 +75,14 @@ func (c *SOCTController) enqueueClusterWorkspace(obj interface{}) { // Run starts the SOCT controller. It needs to start updating // counters in trackers for APF to work -func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { +func (c *SOCTController) Run(ctx context.Context) { defer runtime.HandleCrash() defer c.cwQueue.ShutDown() - klog.Infof("Starting %s controller", SOCTControllerName) - defer klog.Infof("Shutting down %s controller", SOCTControllerName) + logger := logging.WithReconciler(klog.FromContext(ctx), SOCTControllerName) + ctx = klog.NewContext(ctx, logger) + logger.Info("Starting controller") + defer logger.Info("Shutting down controller") // Start trackers of default clusters defaultClusters := []logicalcluster.Name{ @@ -92,13 +94,14 @@ func (c *SOCTController) Run(ctx context.Context, stop <-chan struct{}) { logicalcluster.New("root:compute"), } for _, cluster := range defaultClusters { + logger.Info("Starting storage object count tracker for logical cluster", "clusterName", cluster) c.startClusterTracker(ctx, cluster) defer c.stopClusterTracker(ctx, cluster) } - go wait.Until(func() { c.runClusterWorkspaceWorker(ctx) }, time.Second, stop) + go wait.UntilWithContext(ctx, c.runClusterWorkspaceWorker, time.Second) - <-stop + <-ctx.Done() } func (c *SOCTController) runClusterWorkspaceWorker(ctx context.Context) { @@ -162,9 +165,13 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName logicalcluster.Name) { clusterNameStr := clusterName.String() - c.tracker.CreateTracker(clusterNameStr) - // TODO: start a goroutine to subscribe to changes in API + // Pass the global context in for the cluster specific tracker created to catch global ctx.Done + // so that the pruning and the observer goroutines can be gracefully terminated if global context + // is cancelled + c.tracker.CreateTracker(ctx, clusterNameStr) + + // Start a goroutine to subscribe to changes in API apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) go func() { var discoveryCancel func() @@ -182,7 +189,7 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo discoveryCancel() } // logger.V(4).Info("got API change notification") - ctx, discoveryCancel = context.WithCancel(ctx) + ctx, discoveryCancel = context.WithCancel(ctx) // TODO: fix the usage of contexts c.updateObservers(ctx, clusterName) } } @@ -200,6 +207,8 @@ func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalclu // Start observer goroutines for all api resources in the logical cluster listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() + // TODO: should pass context into start and stop observer functions + // StartObserving might be called multiple times for the same resource // subsequent calls will be ignored for gvr := range listers { diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index 6ff6938a82c..06830334995 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -1212,20 +1212,13 @@ func (s *Server) installKcpSOCTController( return nil // don't klog.Fatal. This only happens when context is cancelled. } - go c.Run(ctx, hookContext.StopCh) + go c.Run(util.GoContext(hookContext)) return nil }); err != nil { return err } - // if err := server.AddPreShutdownHook(controllerName, func() error { - // close(s.quotaAdmissionStopCh) - // return nil - // }); err != nil { - // return err - // } - return nil } From 99302defb681e04578f4c1c8a74723bb3b0d5abf Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Mon, 24 Oct 2022 01:50:17 +0000 Subject: [PATCH 18/28] refactor for apischanged context usage --- pkg/reconciler/soct/controller.go | 67 +++++++++++++++++++------------ 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index b8a2cd780f1..0c438bbac1a 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -173,6 +173,7 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo // Start a goroutine to subscribe to changes in API apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) + go func() { var discoveryCancel func() @@ -182,15 +183,31 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo if discoveryCancel != nil { discoveryCancel() } - return case <-apisChanged: if discoveryCancel != nil { discoveryCancel() } - // logger.V(4).Info("got API change notification") - ctx, discoveryCancel = context.WithCancel(ctx) // TODO: fix the usage of contexts - c.updateObservers(ctx, clusterName) + apisChangedCtx, cancelFunc := context.WithCancel(ctx) + discoveryCancel = cancelFunc + // c.updateObservers(apisChangedCtx, clusterName) + listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() + resourceNames := make([]string, len(listers)+len(notSynced)) + getterFuncs := make([]func() int64, len(listers)+len(notSynced)) + i := 0 + for gvr := range listers { + resourceName := gvr.GroupResource().String() + resourceNames[i] = resourceName + getterFuncs[i] = func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) } + i++ + } + for _, gvr := range notSynced { + resourceName := gvr.GroupResource().String() + resourceNames[i] = resourceName + getterFuncs[i] = func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) } + i++ + } + c.tracker.UpdateObservers(apisChangedCtx, clusterName.String(), resourceNames, getterFuncs) } } }() @@ -203,24 +220,24 @@ func (c *SOCTController) stopClusterTracker(ctx context.Context, clusterName log c.tracker.DeleteTracker(clusterNameStr) } -func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { - // Start observer goroutines for all api resources in the logical cluster - listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() - - // TODO: should pass context into start and stop observer functions - - // StartObserving might be called multiple times for the same resource - // subsequent calls will be ignored - for gvr := range listers { - resourceName := gvr.GroupResource().String() - c.tracker.StartObserving(cluster.String(), resourceName, - func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, - ) - } - for _, gvr := range notSynced { - resourceName := gvr.GroupResource().String() - c.tracker.StartObserving(cluster.String(), resourceName, - func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, - ) - } -} +// func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { +// // Start observer goroutines for all api resources in the logical cluster +// listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() + +// // TODO: should pass context into start and stop observer functions + +// // StartObserving might be called multiple times for the same resource +// // subsequent calls will be ignored +// for gvr := range listers { +// resourceName := gvr.GroupResource().String() +// c.tracker.StartObserving(ctx, cluster.String(), resourceName, +// func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, +// ) +// } +// for _, gvr := range notSynced { +// resourceName := gvr.GroupResource().String() +// c.tracker.StartObserving(ctx, cluster.String(), resourceName, +// func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, +// ) +// } +// } From 55dd49a33839b6d631c8e8e634008fcb84f0514c Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 27 Oct 2022 18:38:00 +0000 Subject: [PATCH 19/28] refactoring SOCT controller --- pkg/reconciler/soct/controller.go | 41 ++++++------------------------- 1 file changed, 8 insertions(+), 33 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 0c438bbac1a..ae85c9aea16 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -190,24 +190,21 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo } apisChangedCtx, cancelFunc := context.WithCancel(ctx) discoveryCancel = cancelFunc - // c.updateObservers(apisChangedCtx, clusterName) + listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() - resourceNames := make([]string, len(listers)+len(notSynced)) - getterFuncs := make([]func() int64, len(listers)+len(notSynced)) - i := 0 + for gvr := range listers { resourceName := gvr.GroupResource().String() - resourceNames[i] = resourceName - getterFuncs[i] = func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) } - i++ + c.tracker.StartObserving(apisChangedCtx, clusterNameStr, resourceName, + func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) }, + ) } for _, gvr := range notSynced { resourceName := gvr.GroupResource().String() - resourceNames[i] = resourceName - getterFuncs[i] = func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) } - i++ + c.tracker.StartObserving(apisChangedCtx, clusterNameStr, resourceName, + func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) }, + ) } - c.tracker.UpdateObservers(apisChangedCtx, clusterName.String(), resourceNames, getterFuncs) } } }() @@ -219,25 +216,3 @@ func (c *SOCTController) stopClusterTracker(ctx context.Context, clusterName log // FIXME: should also stop discovery threads c.tracker.DeleteTracker(clusterNameStr) } - -// func (c *SOCTController) updateObservers(ctx context.Context, cluster logicalcluster.Name) { -// // Start observer goroutines for all api resources in the logical cluster -// listers, notSynced := c.dynamicDiscoverySharedInformerFactory.Listers() - -// // TODO: should pass context into start and stop observer functions - -// // StartObserving might be called multiple times for the same resource -// // subsequent calls will be ignored -// for gvr := range listers { -// resourceName := gvr.GroupResource().String() -// c.tracker.StartObserving(ctx, cluster.String(), resourceName, -// func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, -// ) -// } -// for _, gvr := range notSynced { -// resourceName := gvr.GroupResource().String() -// c.tracker.StartObserving(ctx, cluster.String(), resourceName, -// func() int64 { return c.getterRegistry.GetObjectCount(cluster, resourceName) }, -// ) -// } -// } From 34b14e150b58ae39b374b5273d2b92db085c229a Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Fri, 28 Oct 2022 18:15:15 +0000 Subject: [PATCH 20/28] Enable kcp apf watch tracker --- pkg/reconciler/kubeapf/kube_apf.go | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index 500740d6055..a05a9d17868 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -41,11 +41,11 @@ type KubeApfDelegator struct { stopCh <-chan struct{} - utilflowcontrol.WatchTracker + utilflowcontrol.KcpWatchTracker } // Make sure utilflowcontrol.Interface is implemented -var _ utilflowcontrol.Interface = &KubeApfDelegator{} +// var _ utilflowcontrol.Interface = &KubeApfDelegator{} // var defaultCluster logicalcluster.Name = logicalcluster.Wildcard @@ -62,7 +62,7 @@ func NewKubeApfDelegator( serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, - WatchTracker: utilflowcontrol.NewWatchTracker(), + KcpWatchTracker: utilflowcontrol.NewKcpWatchTracker(), } } @@ -83,18 +83,6 @@ func (k *KubeApfDelegator) Handle(ctx context.Context, delegate.Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) } -// // GetInterestedWatchCount implements flowcontrol.Interface -// func (k *KubeApfDelegator) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { -// // FIXME: Figure out the right way to implement WatchTracker -// return k.delegates[defaultCluster].GetInterestedWatchCount(requestInfo) -// } - -// // RegisterWatch implements flowcontrol.Interface -// func (k *KubeApfDelegator) RegisterWatch(r *http.Request) utilflowcontrol.ForgetWatchFunc { -// // FIXME: Figure out the right way to implement WatchTracker -// return k.delegates[defaultCluster].RegisterWatch(r) -// } - // Install implements flowcontrol.Interface func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { // k.pathRecorderMux = c // store the reference for Install later From ddd59dc6f17c0272a417dd603ca6c15ca676b47e Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 3 Nov 2022 20:34:18 +0000 Subject: [PATCH 21/28] refactoring --- pkg/reconciler/kubeapf/kube_apf.go | 3 +++ pkg/reconciler/soct/controller.go | 23 +++++++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index a05a9d17868..225f142fbc1 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -66,6 +66,8 @@ func NewKubeApfDelegator( } } +// TODO: monitor ClusterWorkspace Deletes and remove corresponding delegate + // Handle implements flowcontrol.Interface func (k *KubeApfDelegator) Handle(ctx context.Context, requestDigest utilflowcontrol.RequestDigest, @@ -136,6 +138,7 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) k.delegates[clusterName] = delegate // Start cluster scoped apf controller + // TODO(cyang49): There should be life cycle management for cluster specific delegate go delegate.MaintainObservations(k.stopCh) // FIXME: Metric observations need to work per-cluster --> beware of metrics explosion go delegate.Run(k.stopCh) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index ae85c9aea16..2d7eddec16d 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -95,8 +95,8 @@ func (c *SOCTController) Run(ctx context.Context) { } for _, cluster := range defaultClusters { logger.Info("Starting storage object count tracker for logical cluster", "clusterName", cluster) - c.startClusterTracker(ctx, cluster) - defer c.stopClusterTracker(ctx, cluster) + c.startClusterWorkspaceTracker(ctx, cluster) + defer c.stopClusterWorkspaceTracker(ctx, cluster) } go wait.UntilWithContext(ctx, c.runClusterWorkspaceWorker, time.Second) @@ -151,25 +151,25 @@ func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string if err != nil { if kerrors.IsNotFound(err) { logger.V(2).Info("ClusterWorkspace not found - deleting tracker") - c.stopClusterTracker(ctx, clusterName) + c.stopClusterWorkspaceTracker(ctx, clusterName) return nil } return err } logger = logging.WithObject(logger, ws) - c.startClusterTracker(ctx, clusterName) + c.startClusterWorkspaceTracker(ctx, clusterName) logger.V(2).Info("Cluster tracker started") return nil } -func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName logicalcluster.Name) { +func (c *SOCTController) startClusterWorkspaceTracker(ctx context.Context, clusterName logicalcluster.Name) { clusterNameStr := clusterName.String() // Pass the global context in for the cluster specific tracker created to catch global ctx.Done // so that the pruning and the observer goroutines can be gracefully terminated if global context // is cancelled - c.tracker.CreateTracker(ctx, clusterNameStr) + c.tracker.CreateClusterSpecificTracker(ctx, clusterNameStr) // Start a goroutine to subscribe to changes in API apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) @@ -195,13 +195,13 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo for gvr := range listers { resourceName := gvr.GroupResource().String() - c.tracker.StartObserving(apisChangedCtx, clusterNameStr, resourceName, + c.tracker.EnsureObserving(apisChangedCtx, clusterNameStr, resourceName, func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) }, ) } - for _, gvr := range notSynced { + for _, gvr := range notSynced { // TODO: confirm with Andy and Steve whether this is needed resourceName := gvr.GroupResource().String() - c.tracker.StartObserving(apisChangedCtx, clusterNameStr, resourceName, + c.tracker.EnsureObserving(apisChangedCtx, clusterNameStr, resourceName, func() int64 { return c.getterRegistry.GetObjectCount(clusterName, resourceName) }, ) } @@ -210,9 +210,8 @@ func (c *SOCTController) startClusterTracker(ctx context.Context, clusterName lo }() } -func (c *SOCTController) stopClusterTracker(ctx context.Context, clusterName logicalcluster.Name) { +func (c *SOCTController) stopClusterWorkspaceTracker(ctx context.Context, clusterName logicalcluster.Name) { clusterNameStr := clusterName.String() c.dynamicDiscoverySharedInformerFactory.Unsubscribe("soct-" + clusterNameStr) - // FIXME: should also stop discovery threads - c.tracker.DeleteTracker(clusterNameStr) + c.tracker.DeleteClusterSpecificTracker(clusterNameStr) } From 557e8cdeee94c146e59d478c3e232d257a46705e Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Fri, 4 Nov 2022 15:09:39 +0000 Subject: [PATCH 22/28] refactor kube apf delegator --- pkg/reconciler/kubeapf/kube_apf.go | 68 +++++++++++++++++++----------- 1 file changed, 43 insertions(+), 25 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index 225f142fbc1..d3be137c738 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -32,13 +32,17 @@ type KubeApfDelegator struct { serverConcurrencyLimit int requestWaitLimit time.Duration - // delegates are the references to cluster scoped apf controllers - delegates map[logicalcluster.Name]utilflowcontrol.Interface - lock sync.RWMutex + // delegates are the references to cluster specific apf controllers + delegates map[logicalcluster.Name]utilflowcontrol.Interface + // delegateStopChs are cluster specific stopChs that can be used + // to stop single delegate when its corresponding ClusterWorkspace + // is removed + delegateStopChs map[logicalcluster.Name]chan struct{} pathRecorderMux *mux.PathRecorderMux + // stopCh lets delegator receive stop signal from outside stopCh <-chan struct{} utilflowcontrol.KcpWatchTracker @@ -47,8 +51,6 @@ type KubeApfDelegator struct { // Make sure utilflowcontrol.Interface is implemented // var _ utilflowcontrol.Interface = &KubeApfDelegator{} -// var defaultCluster logicalcluster.Name = logicalcluster.Wildcard - // NewKubeApfDelegator func NewKubeApfDelegator( informerFactory kubeinformers.SharedInformerFactory, @@ -62,6 +64,7 @@ func NewKubeApfDelegator( serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, + delegateStopChs: map[logicalcluster.Name]chan struct{}{}, KcpWatchTracker: utilflowcontrol.NewKcpWatchTracker(), } } @@ -76,33 +79,39 @@ func (k *KubeApfDelegator) Handle(ctx context.Context, queueNoteFn fq.QueueNoteFn, execFn func(), ) { - // TODO: missing error handling cluster, _ := genericapirequest.ValidClusterFrom(ctx) klog.V(3).InfoS("KubeApfFilter Handle request for cluster ", "clusterName", cluster.Name) delegate, _ := k.getOrCreateDelegate(cluster.Name) - delegate.Handle(ctx, requestDigest, noteFn, workEstimator, queueNoteFn, execFn) } // Install implements flowcontrol.Interface func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { - // k.pathRecorderMux = c // store the reference for Install later - // Do nothing for now + k.pathRecorderMux = c // store the reference for Install later // FIXME } -// MaintainObservations implements flowcontrol.Interface +// MaintainObservations see comments for the Run function func (k *KubeApfDelegator) MaintainObservations(stopCh <-chan struct{}) { - // TODO: make sure MaintainObservations implementation works with clusters - k.stopCh = stopCh + k.lock.Lock() + if k.stopCh == nil { + k.stopCh = stopCh + } + k.lock.Unlock() + // Block waiting only so that it behaves similarly to cfgCtlr + <-stopCh } -// Run implements flowcontrol.Interface -// The delegator doesn't actually call apf controller Run here -// It stores the stopCh for later use when the cluster scoped -// apf controllers are created +// Run doesn't actually call Run functions of delegates directly +// It stores the stopCh for later use when the cluster specific delegates are created func (k *KubeApfDelegator) Run(stopCh <-chan struct{}) error { - k.stopCh = stopCh + k.lock.Lock() + if k.stopCh == nil { + k.stopCh = stopCh + } + k.lock.Unlock() + // Block waiting only so that it behaves similarly to cfgCtlr + <-stopCh return nil } @@ -124,6 +133,15 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) return delegate, nil } + delegateStopCh := make(chan struct{}) + go func() { + select { + case <-k.stopCh: + close(delegateStopCh) + case <-delegateStopCh: + } + }() + // New delegate uses cluster scoped informer factory and flowcontrol clients scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(clusterName) flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() @@ -133,17 +151,17 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) k.serverConcurrencyLimit, k.requestWaitLimit, ) - - scopedInformerFactory.Start(k.stopCh) - + scopedInformerFactory.Start(delegateStopCh) k.delegates[clusterName] = delegate - // Start cluster scoped apf controller - // TODO(cyang49): There should be life cycle management for cluster specific delegate - go delegate.MaintainObservations(k.stopCh) // FIXME: Metric observations need to work per-cluster --> beware of metrics explosion - go delegate.Run(k.stopCh) + k.delegateStopChs[clusterName] = delegateStopCh + // TODO: can Unlock here? + + // Run cluster specific apf controller + go delegate.MaintainObservations(delegateStopCh) // FIXME: Metric observations need to work per-cluster --> beware of metrics explosion + go delegate.Run(delegateStopCh) // TODO: need to install per-cluster debug endpoint - delegate.Install(k.pathRecorderMux) + delegate.Install(k.pathRecorderMux) // FIXME: this is nil klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) return delegate, nil From a9be0b6e8064c870dc4a6f90c548b02d076bf573 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Fri, 4 Nov 2022 16:01:01 +0000 Subject: [PATCH 23/28] Add logic for kubeApfDelegator to watch clusterWorkspace deletions --- pkg/reconciler/kubeapf/kube_apf.go | 115 ++++++++++++++++++++++++++--- pkg/server/config.go | 24 +++--- 2 files changed, 117 insertions(+), 22 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index d3be137c738..afafc133922 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -2,22 +2,33 @@ package kubeapf import ( "context" + "fmt" "sync" "time" + tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/logging" "github.com/kcp-dev/logicalcluster/v2" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - flowcontrol "k8s.io/api/flowcontrol/v1beta2" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/server/mux" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clusters" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" ) +const KubeApfDelegatorName = "kcp-kube-apf-delegator" + // KubeApfDelegator implements k8s APF controller interface // it is cluster-aware and manages the life cycles of // cluster-specific APF controller instances and delegates @@ -32,6 +43,8 @@ type KubeApfDelegator struct { serverConcurrencyLimit int requestWaitLimit time.Duration + cwQueue workqueue.RateLimitingInterface + lock sync.RWMutex // delegates are the references to cluster specific apf controllers delegates map[logicalcluster.Name]utilflowcontrol.Interface @@ -46,6 +59,8 @@ type KubeApfDelegator struct { stopCh <-chan struct{} utilflowcontrol.KcpWatchTracker + + getClusterWorkspace func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) } // Make sure utilflowcontrol.Interface is implemented @@ -55,22 +70,29 @@ type KubeApfDelegator struct { func NewKubeApfDelegator( informerFactory kubeinformers.SharedInformerFactory, kubeCluster kubernetes.ClusterInterface, + clusterWorkspacesInformer tenancyinformers.ClusterWorkspaceInformer, serverConcurrencyLimit int, requestWaitLimit time.Duration, ) *KubeApfDelegator { - return &KubeApfDelegator{ + k := &KubeApfDelegator{ scopingSharedInformerFactory: newScopingSharedInformerFactory(informerFactory), // not cluster scoped kubeCluster: kubeCluster, // can be made cluster scoped serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, + cwQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), delegates: map[logicalcluster.Name]utilflowcontrol.Interface{}, delegateStopChs: map[logicalcluster.Name]chan struct{}{}, KcpWatchTracker: utilflowcontrol.NewKcpWatchTracker(), + getClusterWorkspace: func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) { + return clusterWorkspacesInformer.Lister().Get(key) + }, } + clusterWorkspacesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + DeleteFunc: k.enqueueClusterWorkspace, + }) + return k } -// TODO: monitor ClusterWorkspace Deletes and remove corresponding delegate - // Handle implements flowcontrol.Interface func (k *KubeApfDelegator) Handle(ctx context.Context, requestDigest utilflowcontrol.RequestDigest, @@ -91,7 +113,8 @@ func (k *KubeApfDelegator) Install(c *mux.PathRecorderMux) { k.pathRecorderMux = c // store the reference for Install later // FIXME } -// MaintainObservations see comments for the Run function +// MaintainObservations doesn't actually call MaintainObservations functions of delegates directly +// It stores the stopCh for later use func (k *KubeApfDelegator) MaintainObservations(stopCh <-chan struct{}) { k.lock.Lock() if k.stopCh == nil { @@ -102,15 +125,16 @@ func (k *KubeApfDelegator) MaintainObservations(stopCh <-chan struct{}) { <-stopCh } -// Run doesn't actually call Run functions of delegates directly -// It stores the stopCh for later use when the cluster specific delegates are created +// Run starts a goroutine to watch ClusterWorkspace deletions func (k *KubeApfDelegator) Run(stopCh <-chan struct{}) error { k.lock.Lock() if k.stopCh == nil { k.stopCh = stopCh } k.lock.Unlock() - // Block waiting only so that it behaves similarly to cfgCtlr + + go wait.Until(k.runClusterWorkspaceWorker, time.Second, stopCh) + <-stopCh return nil } @@ -166,3 +190,72 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) klog.V(3).InfoS("Started new apf controller for cluster", "clusterName", clusterName) return delegate, nil } + +func (k *KubeApfDelegator) enqueueClusterWorkspace(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + runtime.HandleError(err) + return + } + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), KubeApfDelegatorName), key) + logger.V(2).Info("queueing ClusterWorkspace") + k.cwQueue.Add(key) +} + +func (k *KubeApfDelegator) runClusterWorkspaceWorker() { + for k.processNext(k.cwQueue, k.processClusterWorkspace) { + } +} + +func (c *KubeApfDelegator) processNext( + queue workqueue.RateLimitingInterface, + processFunc func(key string) error, +) bool { + // Wait until there is a new item in the working queue + k, quit := queue.Get() + if quit { + return false + } + key := k.(string) + + // No matter what, tell the queue we're done with this key, to unblock + // other workers. + defer queue.Done(key) + + if err := processFunc(key); err != nil { + runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", KubeApfDelegatorName, key, err)) + queue.AddRateLimited(key) + return true + } + queue.Forget(key) + return true +} + +func (k *KubeApfDelegator) processClusterWorkspace(key string) error { + // e.g. root:orgws + parent, name := clusters.SplitClusterAwareKey(key) + + // turn it into root:org:ws + clusterName := parent.Join(name) + _, err := k.getClusterWorkspace(key) + if err != nil { + if kerrors.IsNotFound(err) { + k.stopAndRemoveDelegate(clusterName) + return nil + } + return err + } + return nil +} + +func (k *KubeApfDelegator) stopAndRemoveDelegate(cluster logicalcluster.Name) { + k.lock.Lock() + defer k.lock.Unlock() + + if stopCh, ok := k.delegateStopChs[cluster]; ok { + close(stopCh) + delete(k.delegateStopChs, cluster) + } + + delete(k.delegates, cluster) +} diff --git a/pkg/server/config.go b/pkg/server/config.go index 492b8b24f8c..a46f901b330 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -219,17 +219,6 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { } } - // Call to BuildPriorityAndFairness in kcp instead of kube - if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) && c.Options.GenericControlPlane.GenericServerRunOptions.EnablePriorityAndFairness { - c.GenericConfig.FlowControl = kubeapf.NewKubeApfDelegator( - c.KubeSharedInformerFactory, - c.KubeClusterClient, - 1600, - c.Options.GenericControlPlane.GenericServerRunOptions.RequestTimeout/4, - ) - - } - // Setup kcp * informers, but those will need the identities for the APIExports used to make the APIs available. // The identities are not known before we can get them from the APIExports via the loopback client or from the root shard in case this is a non-root shard, // hence we postpone this to getOrCreateKcpIdentities() in the kcp-start-informers post-start hook. @@ -277,6 +266,19 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { kcpinformers.WithExtraClusterScopedIndexers(indexers.ClusterScoped()), kcpinformers.WithExtraNamespaceScopedIndexers(indexers.NamespaceScoped()), ) + + // Call to BuildPriorityAndFairness in kcp instead of kube + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) && c.Options.GenericControlPlane.GenericServerRunOptions.EnablePriorityAndFairness { + c.GenericConfig.FlowControl = kubeapf.NewKubeApfDelegator( + c.KubeSharedInformerFactory, + c.KubeClusterClient, + c.KcpSharedInformerFactory.Tenancy().V1alpha1().ClusterWorkspaces(), + 1600, + c.Options.GenericControlPlane.GenericServerRunOptions.RequestTimeout/4, + ) + + } + c.DeepSARClient, err = kcpkubernetesclientset.NewForConfig(authorization.WithDeepSARConfig(rest.CopyConfig(c.GenericConfig.LoopbackClientConfig))) if err != nil { return nil, err From d832a36e8e979a7faad2433de82d7afef427105f Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Tue, 8 Nov 2022 20:35:21 +0000 Subject: [PATCH 24/28] fix bug: apisChanged monitor thread infinite loop --- pkg/reconciler/soct/controller.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 2d7eddec16d..9b55dc1ba1c 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -173,6 +173,8 @@ func (c *SOCTController) startClusterWorkspaceTracker(ctx context.Context, clust // Start a goroutine to subscribe to changes in API apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("soct-" + clusterNameStr) + logger := klog.FromContext(ctx) + logger = logger.WithValues("logicalCluster", clusterName.String()) go func() { var discoveryCancel func() @@ -180,13 +182,14 @@ func (c *SOCTController) startClusterWorkspaceTracker(ctx context.Context, clust for { select { case <-ctx.Done(): - if discoveryCancel != nil { - discoveryCancel() - } return - case <-apisChanged: + case _, more := <-apisChanged: + logger.V(2).Info("apisChanged TRIGGERED -- RESETTING Observers") if discoveryCancel != nil { discoveryCancel() + if !more { // FIXME: Use cluster specific context instead of relying on apisChanged channel? + return + } } apisChangedCtx, cancelFunc := context.WithCancel(ctx) discoveryCancel = cancelFunc From 3ced176b3a702c006ca118b375109a726537b463 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 10 Nov 2022 02:54:21 +0000 Subject: [PATCH 25/28] Update import of kcp client package --- pkg/reconciler/kubeapf/kube_apf.go | 4 ++-- pkg/reconciler/kubeapf/single_cluster_flow_control.go | 6 +++--- pkg/reconciler/soct/controller.go | 4 ++-- pkg/reconciler/util/scoped_generic_informer.go | 6 +++--- pkg/reconciler/util/util.go | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index afafc133922..b7c7648d2f6 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -7,6 +7,7 @@ import ( "time" tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/client" tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" "github.com/kcp-dev/kcp/pkg/logging" "github.com/kcp-dev/logicalcluster/v2" @@ -22,7 +23,6 @@ import ( kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" ) @@ -233,7 +233,7 @@ func (c *KubeApfDelegator) processNext( func (k *KubeApfDelegator) processClusterWorkspace(key string) error { // e.g. root:orgws - parent, name := clusters.SplitClusterAwareKey(key) + parent, name := client.SplitClusterAwareKey(key) // turn it into root:org:ws clusterName := parent.Join(name) diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go index 8308c9f8b2f..70fe6af81bf 100644 --- a/pkg/reconciler/kubeapf/single_cluster_flow_control.go +++ b/pkg/reconciler/kubeapf/single_cluster_flow_control.go @@ -1,6 +1,7 @@ package kubeapf import ( + "github.com/kcp-dev/kcp/pkg/client" "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/kcp/pkg/reconciler/util" "github.com/kcp-dev/logicalcluster/v2" @@ -13,7 +14,6 @@ import ( v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" flowcontrollisterv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" cache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" ) type scopedFlowcontrol struct { @@ -128,7 +128,7 @@ var _ flowcontrollisterv1beta2.FlowSchemaLister = &SingleClusterFlowSchemaLister // Get implements v1beta2.FlowSchemaLister func (s *SingleClusterFlowSchemaLister) Get(name string) (*apiflowcontrolv1beta2.FlowSchema, error) { - key := clusters.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way + key := client.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way obj, exists, err := s.indexer.GetByKey(key) if err != nil { return nil, err @@ -159,7 +159,7 @@ var _ flowcontrollisterv1beta2.PriorityLevelConfigurationLister = &SingleCluster // Get implements v1beta2.PriorityLevelConfigurationLister func (s *SingleClusterPriorityLevelConfigurationLister) Get(name string) (*apiflowcontrolv1beta2.PriorityLevelConfiguration, error) { - key := clusters.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way + key := client.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way obj, exists, err := s.indexer.GetByKey(key) if err != nil { return nil, err diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 9b55dc1ba1c..30f0f88c095 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -6,6 +6,7 @@ import ( "time" tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/client" tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" "github.com/kcp-dev/kcp/pkg/informer" "github.com/kcp-dev/kcp/pkg/logging" @@ -16,7 +17,6 @@ import ( flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" kubernetesclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" ) @@ -141,7 +141,7 @@ func (c *SOCTController) processNext( func (c *SOCTController) processClusterWorkspace(ctx context.Context, key string) error { logger := klog.FromContext(ctx) // e.g. root:orgws - parent, name := clusters.SplitClusterAwareKey(key) + parent, name := client.SplitClusterAwareKey(key) // turn it into root:org:ws clusterName := parent.Join(name) diff --git a/pkg/reconciler/util/scoped_generic_informer.go b/pkg/reconciler/util/scoped_generic_informer.go index 127cbb70d98..cb9d8c14ba9 100644 --- a/pkg/reconciler/util/scoped_generic_informer.go +++ b/pkg/reconciler/util/scoped_generic_informer.go @@ -1,6 +1,7 @@ package util import ( + "github.com/kcp-dev/kcp/pkg/client" "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/logicalcluster/v2" "k8s.io/apimachinery/pkg/api/errors" @@ -10,7 +11,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" ) // scopedGenericInformer wraps an informers.GenericInformer and produces instances of cache.GenericLister that are @@ -68,7 +68,7 @@ func (s *scopedGenericLister) ByNamespace(namespace string) cache.GenericNamespa // Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster. func (s *scopedGenericLister) Get(name string) (runtime.Object, error) { - key := clusters.ToClusterAwareKey(s.clusterName, name) + key := client.ToClusterAwareKey(s.clusterName, name) obj, exists, err := s.indexer.GetByKey(key) if err != nil { return nil, err @@ -127,7 +127,7 @@ func (s *scopedGenericNamespaceLister) List(selector labels.Selector) (ret []run indexValue = s.clusterName.String() } else { indexName = indexers.ByLogicalClusterAndNamespace - indexValue = clusters.ToClusterAwareKey(s.clusterName, s.namespace) + indexValue = client.ToClusterAwareKey(s.clusterName, s.namespace) } err = ListByIndex(s.indexer, indexName, indexValue, selector, func(obj interface{}) { ret = append(ret, obj.(runtime.Object)) diff --git a/pkg/reconciler/util/util.go b/pkg/reconciler/util/util.go index 46e831b54a2..de5d1e094c5 100644 --- a/pkg/reconciler/util/util.go +++ b/pkg/reconciler/util/util.go @@ -1,10 +1,10 @@ package util import ( + "github.com/kcp-dev/kcp/pkg/client" "github.com/kcp-dev/logicalcluster/v2" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clusters" ) func ClusterNameForObj(obj interface{}) logicalcluster.Name { @@ -20,6 +20,6 @@ func ClusterNameForObj(obj interface{}) logicalcluster.Name { return logicalcluster.Name{} } - cluster, _ := clusters.SplitClusterAwareKey(clusterAndName) + cluster, _ := client.SplitClusterAwareKey(clusterAndName) return cluster } From adf61897b6dd03a2f28d9b71e239a7136655ae6b Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 10 Nov 2022 16:37:49 +0000 Subject: [PATCH 26/28] renaming pkg kubeclusterclient --- pkg/reconciler/soct/controller.go | 6 ++++-- pkg/server/controllers.go | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/soct/controller.go b/pkg/reconciler/soct/controller.go index 30f0f88c095..77aaf455bff 100644 --- a/pkg/reconciler/soct/controller.go +++ b/pkg/reconciler/soct/controller.go @@ -15,7 +15,9 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" - kubernetesclient "k8s.io/client-go/kubernetes" + + // kubernetesclient "k8s.io/client-go/kubernetes" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -39,7 +41,7 @@ type SOCTController struct { // NewSOCTController func NewSOCTController( - kubeClusterClient *kubernetesclient.Cluster, + kubeClusterClient *kcpkubernetesclientset.ClusterClientset, // FIXME: unused? clusterWorkspacesInformer tenancyinformers.ClusterWorkspaceInformer, dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory, getterRegistry flowcontrolrequest.StorageObjectCountGetterRegistry, diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index 06830334995..1387f52c991 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -1191,7 +1191,7 @@ func (s *Server) installKcpSOCTController( config = rest.CopyConfig(config) config = rest.AddUserAgent(config, controllerName) - kubeClusterClient, err := kubernetesclient.NewClusterForConfig(config) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) if err != nil { return err } @@ -1212,7 +1212,7 @@ func (s *Server) installKcpSOCTController( return nil // don't klog.Fatal. This only happens when context is cancelled. } - go c.Run(util.GoContext(hookContext)) + go c.Run(goContext(hookContext)) return nil }); err != nil { From 261606c9a1f2b3cbbaa2ac666991f1bde9c861af Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Thu, 10 Nov 2022 20:24:43 +0000 Subject: [PATCH 27/28] Adapt the use of kcp kube shared informer factory --- pkg/reconciler/kubeapf/kube_apf.go | 19 +- .../kubeapf/scoped_shared_informer_factory.go | 194 ------------------ .../kubeapf/single_cluster_flow_control.go | 182 ---------------- 3 files changed, 11 insertions(+), 384 deletions(-) delete mode 100644 pkg/reconciler/kubeapf/scoped_shared_informer_factory.go delete mode 100644 pkg/reconciler/kubeapf/single_cluster_flow_control.go diff --git a/pkg/reconciler/kubeapf/kube_apf.go b/pkg/reconciler/kubeapf/kube_apf.go index b7c7648d2f6..5a8641e421e 100644 --- a/pkg/reconciler/kubeapf/kube_apf.go +++ b/pkg/reconciler/kubeapf/kube_apf.go @@ -6,6 +6,7 @@ import ( "sync" "time" + kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" "github.com/kcp-dev/kcp/pkg/client" tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" @@ -20,7 +21,6 @@ import ( utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" - kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -35,7 +35,7 @@ const KubeApfDelegatorName = "kcp-kube-apf-delegator" // requests from the handler chain to them type KubeApfDelegator struct { // scopingInformerFactory is not cluster scoped but can be made cluster scoped - scopingSharedInformerFactory *scopingSharedInformerFactory + scopingSharedInformerFactory kcpkubernetesinformers.SharedInformerFactory // kubeCluster ClusterInterface can be used to get cluster scoped clientset kubeCluster kubernetes.ClusterInterface @@ -68,15 +68,15 @@ type KubeApfDelegator struct { // NewKubeApfDelegator func NewKubeApfDelegator( - informerFactory kubeinformers.SharedInformerFactory, + informerFactory kcpkubernetesinformers.SharedInformerFactory, kubeCluster kubernetes.ClusterInterface, clusterWorkspacesInformer tenancyinformers.ClusterWorkspaceInformer, serverConcurrencyLimit int, requestWaitLimit time.Duration, ) *KubeApfDelegator { k := &KubeApfDelegator{ - scopingSharedInformerFactory: newScopingSharedInformerFactory(informerFactory), // not cluster scoped - kubeCluster: kubeCluster, // can be made cluster scoped + scopingSharedInformerFactory: informerFactory, // not cluster scoped + kubeCluster: kubeCluster, // can be made cluster scoped serverConcurrencyLimit: serverConcurrencyLimit, requestWaitLimit: requestWaitLimit, cwQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), @@ -167,15 +167,18 @@ func (k *KubeApfDelegator) getOrCreateDelegate(clusterName logicalcluster.Name) }() // New delegate uses cluster scoped informer factory and flowcontrol clients - scopedInformerFactory := k.scopingSharedInformerFactory.ForCluster(clusterName) + scopedFlowSchemaInformer := k.scopingSharedInformerFactory.Flowcontrol().V1beta2().FlowSchemas().Cluster(clusterName) + scopedPriorityLevelConfigurationInformer := k.scopingSharedInformerFactory.Flowcontrol().V1beta2().PriorityLevelConfigurations().Cluster(clusterName) + flowcontrolClient := k.kubeCluster.Cluster(clusterName).FlowcontrolV1beta2() delegate = utilflowcontrol.New( - scopedInformerFactory, + scopedFlowSchemaInformer, + scopedPriorityLevelConfigurationInformer, flowcontrolClient, k.serverConcurrencyLimit, k.requestWaitLimit, ) - scopedInformerFactory.Start(delegateStopCh) + // scopedInformerFactory.Start(delegateStopCh) k.delegates[clusterName] = delegate k.delegateStopChs[clusterName] = delegateStopCh // TODO: can Unlock here? diff --git a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go b/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go deleted file mode 100644 index 8bb02f0870b..00000000000 --- a/pkg/reconciler/kubeapf/scoped_shared_informer_factory.go +++ /dev/null @@ -1,194 +0,0 @@ -package kubeapf - -import ( - reflect "reflect" - - "github.com/kcp-dev/kcp/pkg/reconciler/util" - "github.com/kcp-dev/logicalcluster/v2" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - admissionregistration "k8s.io/client-go/informers/admissionregistration" - apiserverinternal "k8s.io/client-go/informers/apiserverinternal" - apps "k8s.io/client-go/informers/apps" - autoscaling "k8s.io/client-go/informers/autoscaling" - batch "k8s.io/client-go/informers/batch" - certificates "k8s.io/client-go/informers/certificates" - coordination "k8s.io/client-go/informers/coordination" - core "k8s.io/client-go/informers/core" - discovery "k8s.io/client-go/informers/discovery" - events "k8s.io/client-go/informers/events" - extensions "k8s.io/client-go/informers/extensions" - flowcontrol "k8s.io/client-go/informers/flowcontrol" - "k8s.io/client-go/informers/internalinterfaces" - networking "k8s.io/client-go/informers/networking" - node "k8s.io/client-go/informers/node" - policy "k8s.io/client-go/informers/policy" - rbac "k8s.io/client-go/informers/rbac" - scheduling "k8s.io/client-go/informers/scheduling" - storage "k8s.io/client-go/informers/storage" - cache "k8s.io/client-go/tools/cache" -) - -// scopingSharedInformerFactory is used to create scopedSharedInformerFactory that -// targets a specific logical cluster -type scopingSharedInformerFactory struct { - factory informers.SharedInformerFactory - delegatingEventHandler *util.DelegatingEventHandler -} - -func newScopingSharedInformerFactory(factory informers.SharedInformerFactory) *scopingSharedInformerFactory { - return &scopingSharedInformerFactory{ - factory: factory, - delegatingEventHandler: util.NewDelegatingEventHandler(), - } -} - -// ForCluster returns a scopedSharedInformerFactory that works on a specific logical cluster -func (f *scopingSharedInformerFactory) ForCluster(clusterName logicalcluster.Name) *scopedSharedInformerFactory { - return &scopedSharedInformerFactory{ - delegate: f.factory, - clusterName: clusterName, - delegatingEventHandler: f.delegatingEventHandler, - } -} - -// scopedSharedInformerFactory can create informers (currently limited to Flowcontrol) -// that are for a single cluster -type scopedSharedInformerFactory struct { - delegate informers.SharedInformerFactory - clusterName logicalcluster.Name - delegatingEventHandler *util.DelegatingEventHandler -} - -var _ informers.SharedInformerFactory = &scopedSharedInformerFactory{} - -// ExtraClusterScopedIndexers implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) ExtraClusterScopedIndexers() cache.Indexers { - panic("unimplemented") -} - -// ExtraNamespaceScopedIndexers implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) ExtraNamespaceScopedIndexers() cache.Indexers { - panic("unimplemented") -} - -// InformerFor implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - panic("unimplemented") -} - -// KeyFunction implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) KeyFunction() cache.KeyFunc { - panic("unimplemented") -} - -// Start implements informers.SharedInformerFactory -func (f *scopedSharedInformerFactory) Start(stopCh <-chan struct{}) { - f.delegate.Start(stopCh) -} - -// Admissionregistration implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Admissionregistration() admissionregistration.Interface { - panic("unimplemented") -} - -// Apps implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Apps() apps.Interface { - panic("unimplemented") -} - -// Autoscaling implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Autoscaling() autoscaling.Interface { - panic("unimplemented") -} - -// Batch implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Batch() batch.Interface { - panic("unimplemented") -} - -// Certificates implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Certificates() certificates.Interface { - panic("unimplemented") -} - -// Coordination implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Coordination() coordination.Interface { - panic("unimplemented") -} - -// Core implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Core() core.Interface { - panic("unimplemented") -} - -// Discovery implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Discovery() discovery.Interface { - panic("unimplemented") -} - -// Events implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Events() events.Interface { - panic("unimplemented") -} - -// Extensions implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Extensions() extensions.Interface { - panic("unimplemented") -} - -// Flowcontrol returns an implementation of flowcontrol.Interface -// that is targeting a single cluster -func (f *scopedSharedInformerFactory) Flowcontrol() flowcontrol.Interface { - return &scopedFlowcontrol{ - clusterName: f.clusterName, - delegate: f.delegate.Flowcontrol(), - delegatingEventHandler: f.delegatingEventHandler, - } -} - -// ForResource implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { - panic("unimplemented") -} - -// Internal implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Internal() apiserverinternal.Interface { - panic("unimplemented") -} - -// Networking implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Networking() networking.Interface { - panic("unimplemented") -} - -// Node implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Node() node.Interface { - panic("unimplemented") -} - -// Policy implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Policy() policy.Interface { - panic("unimplemented") -} - -// Rbac implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Rbac() rbac.Interface { - panic("unimplemented") -} - -// Scheduling implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Scheduling() scheduling.Interface { - panic("unimplemented") -} - -// Storage implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) Storage() storage.Interface { - panic("unimplemented") -} - -// WaitForCacheSync implements informers.SharedInformerFactory -func (*scopedSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - panic("unimplemented") -} diff --git a/pkg/reconciler/kubeapf/single_cluster_flow_control.go b/pkg/reconciler/kubeapf/single_cluster_flow_control.go deleted file mode 100644 index 70fe6af81bf..00000000000 --- a/pkg/reconciler/kubeapf/single_cluster_flow_control.go +++ /dev/null @@ -1,182 +0,0 @@ -package kubeapf - -import ( - "github.com/kcp-dev/kcp/pkg/client" - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/reconciler/util" - "github.com/kcp-dev/logicalcluster/v2" - apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - flowcontrol "k8s.io/client-go/informers/flowcontrol" - v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" - v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" - v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" - flowcontrollisterv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" - cache "k8s.io/client-go/tools/cache" -) - -type scopedFlowcontrol struct { - clusterName logicalcluster.Name - delegate flowcontrol.Interface - delegatingEventHandler *util.DelegatingEventHandler -} - -var _ flowcontrol.Interface = &scopedFlowcontrol{} - -// V1alpha1 implements flowcontrol.Interface -func (*scopedFlowcontrol) V1alpha1() v1alpha1.Interface { - panic("unimplemented") -} - -// V1beta1 implements flowcontrol.Interface -func (*scopedFlowcontrol) V1beta1() v1beta1.Interface { - panic("unimplemented") -} - -// V1beta2 implements flowcontrol.Interface -func (f *scopedFlowcontrol) V1beta2() v1beta2.Interface { - return &scopedFlowcontrolV1Beta2{ - clusterName: f.clusterName, - delegate: f.delegate.V1beta2(), - delegatingEventHandler: f.delegatingEventHandler, - } -} - -type scopedFlowcontrolV1Beta2 struct { - clusterName logicalcluster.Name - delegate v1beta2.Interface - delegatingEventHandler *util.DelegatingEventHandler -} - -var _ v1beta2.Interface = &scopedFlowcontrolV1Beta2{} - -// FlowSchemas implements v1beta2.Interface -func (f *scopedFlowcontrolV1Beta2) FlowSchemas() v1beta2.FlowSchemaInformer { - return &scopedFlowSchemaInformer{ - clusterName: f.clusterName, - delegate: f.delegate.FlowSchemas(), - delegatingEventHandler: f.delegatingEventHandler, - } -} - -// PriorityLevelConfigurations implements v1beta2.Interface -func (f *scopedFlowcontrolV1Beta2) PriorityLevelConfigurations() v1beta2.PriorityLevelConfigurationInformer { - return &scopedPriorityLevelConfigurationInformer{ - clusterName: f.clusterName, - delegate: f.delegate.PriorityLevelConfigurations(), - delegatingEventHandler: f.delegatingEventHandler, - } -} - -type scopedFlowSchemaInformer struct { - clusterName logicalcluster.Name - delegate v1beta2.FlowSchemaInformer - delegatingEventHandler *util.DelegatingEventHandler -} -type scopedPriorityLevelConfigurationInformer struct { - clusterName logicalcluster.Name - delegate v1beta2.PriorityLevelConfigurationInformer - delegatingEventHandler *util.DelegatingEventHandler -} - -var _ v1beta2.FlowSchemaInformer = &scopedFlowSchemaInformer{} -var _ v1beta2.PriorityLevelConfigurationInformer = &scopedPriorityLevelConfigurationInformer{} - -// Informer implements v1beta2.FlowSchemaInformer -func (s *scopedFlowSchemaInformer) Informer() cache.SharedIndexInformer { - return &util.DelegatingInformer{ - ClusterName: s.clusterName, - Resource: apiflowcontrolv1beta2.Resource("flowschemas"), - SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared - DelegatingEventHandler: s.delegatingEventHandler, - } -} - -// Lister implements v1beta2.FlowSchemaInformer -func (s *scopedFlowSchemaInformer) Lister() flowcontrollisterv1beta2.FlowSchemaLister { - return &SingleClusterFlowSchemaLister{ - indexer: s.delegate.Informer().GetIndexer(), - clusterName: s.clusterName, - } -} - -// Informer implements v1beta2.PriorityLevelConfigurationInformer -func (s *scopedPriorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return &util.DelegatingInformer{ - ClusterName: s.clusterName, - Resource: apiflowcontrolv1beta2.Resource("prioritylevelconfigurations"), - SharedIndexInformer: s.delegate.Informer(), // this informer is checked through sharedInformerFactory logic, i.e. single instance will be shared - DelegatingEventHandler: s.delegatingEventHandler, - } -} - -// Lister implements v1beta2.PriorityLevelConfigurationInformer -func (s *scopedPriorityLevelConfigurationInformer) Lister() flowcontrollisterv1beta2.PriorityLevelConfigurationLister { - return &SingleClusterPriorityLevelConfigurationLister{ - indexer: s.delegate.Informer().GetIndexer(), - clusterName: s.clusterName, - } -} - -type SingleClusterFlowSchemaLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name -} - -var _ flowcontrollisterv1beta2.FlowSchemaLister = &SingleClusterFlowSchemaLister{} - -// Get implements v1beta2.FlowSchemaLister -func (s *SingleClusterFlowSchemaLister) Get(name string) (*apiflowcontrolv1beta2.FlowSchema, error) { - key := client.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(apiflowcontrolv1beta2.Resource("flowschema"), name) - } - return obj.(*apiflowcontrolv1beta2.FlowSchema), nil -} - -// List implements v1beta2.FlowSchemaLister -func (s *SingleClusterFlowSchemaLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.FlowSchema, err error) { - if err := util.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { - ret = append(ret, obj.(*apiflowcontrolv1beta2.FlowSchema)) - }); err != nil { - return nil, err - } - - return ret, nil -} - -type SingleClusterPriorityLevelConfigurationLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name -} - -var _ flowcontrollisterv1beta2.PriorityLevelConfigurationLister = &SingleClusterPriorityLevelConfigurationLister{} - -// Get implements v1beta2.PriorityLevelConfigurationLister -func (s *SingleClusterPriorityLevelConfigurationLister) Get(name string) (*apiflowcontrolv1beta2.PriorityLevelConfiguration, error) { - key := client.ToClusterAwareKey(s.clusterName, name) // FIXME: not sure if this is the right way - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(apiflowcontrolv1beta2.Resource("prioritylevelconfiguration"), name) - } - return obj.(*apiflowcontrolv1beta2.PriorityLevelConfiguration), nil -} - -// List implements v1beta2.PriorityLevelConfigurationLister -func (s *SingleClusterPriorityLevelConfigurationLister) List(selector labels.Selector) (ret []*apiflowcontrolv1beta2.PriorityLevelConfiguration, err error) { - if err := util.ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { - ret = append(ret, obj.(*apiflowcontrolv1beta2.PriorityLevelConfiguration)) - }); err != nil { - return nil, err - } - - return ret, nil -} From aa607475f8b163f237c7befb5093dc5f9a985397 Mon Sep 17 00:00:00 2001 From: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Date: Mon, 14 Nov 2022 18:24:10 +0000 Subject: [PATCH 28/28] remove scoped informer code that's no longer needed --- .../util/delegating_event_handler.go | 123 --------------- pkg/reconciler/util/delegating_informer.go | 28 ---- .../util/scoped_generic_informer.go | 149 ------------------ pkg/reconciler/util/util.go | 25 --- 4 files changed, 325 deletions(-) delete mode 100644 pkg/reconciler/util/delegating_event_handler.go delete mode 100644 pkg/reconciler/util/delegating_informer.go delete mode 100644 pkg/reconciler/util/scoped_generic_informer.go delete mode 100644 pkg/reconciler/util/util.go diff --git a/pkg/reconciler/util/delegating_event_handler.go b/pkg/reconciler/util/delegating_event_handler.go deleted file mode 100644 index 51b356440f0..00000000000 --- a/pkg/reconciler/util/delegating_event_handler.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "time" - - "github.com/kcp-dev/logicalcluster/v2" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -// DelegatingEventHandler multiplexes event handlers for multiple resource types and logical clusters. -type DelegatingEventHandler struct { - lock sync.RWMutex - eventHandlers map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler -} - -// NewDelegatingEventHandler returns a new delegatingEventHandler. -func NewDelegatingEventHandler() *DelegatingEventHandler { - return &DelegatingEventHandler{ - eventHandlers: map[schema.GroupResource]map[logicalcluster.Name]cache.ResourceEventHandler{}, - } -} - -// registerEventHandler registers an event handler, h, to receive events for the given resource/informer scoped only to -// clusterName. -func (d *DelegatingEventHandler) registerEventHandler( - resource schema.GroupResource, - informer cache.SharedIndexInformer, - clusterName logicalcluster.Name, - h cache.ResourceEventHandler, -) { - d.lock.Lock() - defer d.lock.Unlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} - d.eventHandlers[resource] = groupResourceHandlers - - informer.AddEventHandler(d.resourceEventHandlerFuncs(resource)) - } - - groupResourceHandlers[clusterName] = h -} - -// registerEventHandlerWithResyncPeriod registers an event handler, h, to receive events for the given resource/informer -// scoped only to clusterName, with the given resync period. -func (d *DelegatingEventHandler) registerEventHandlerWithResyncPeriod( - resource schema.GroupResource, - informer cache.SharedIndexInformer, - clusterName logicalcluster.Name, - h cache.ResourceEventHandler, - resyncPeriod time.Duration, -) { - d.lock.Lock() - defer d.lock.Unlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - groupResourceHandlers = map[logicalcluster.Name]cache.ResourceEventHandler{} - d.eventHandlers[resource] = groupResourceHandlers - - informer.AddEventHandlerWithResyncPeriod(d.resourceEventHandlerFuncs(resource), resyncPeriod) - } - - groupResourceHandlers[clusterName] = h -} - -func (d *DelegatingEventHandler) resourceEventHandlerFuncs(resource schema.GroupResource) cache.ResourceEventHandlerFuncs { - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - if h := d.getEventHandler(resource, obj); h != nil { - h.OnAdd(obj) - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { - if h := d.getEventHandler(resource, oldObj); h != nil { - h.OnUpdate(oldObj, newObj) - } - }, - DeleteFunc: func(obj interface{}) { - if h := d.getEventHandler(resource, obj); h != nil { - h.OnDelete(obj) - } - }, - } -} - -// getEventHandler returns a cache.ResourceEventHandler for resource and the logicalcluster.Name for obj. -func (d *DelegatingEventHandler) getEventHandler(resource schema.GroupResource, obj interface{}) cache.ResourceEventHandler { - clusterName := ClusterNameForObj(obj) - if clusterName.Empty() { - return nil - } - - d.lock.RLock() - defer d.lock.RUnlock() - - groupResourceHandlers, ok := d.eventHandlers[resource] - if !ok { - return nil - } - - return groupResourceHandlers[clusterName] -} diff --git a/pkg/reconciler/util/delegating_informer.go b/pkg/reconciler/util/delegating_informer.go deleted file mode 100644 index 84dae2d5b15..00000000000 --- a/pkg/reconciler/util/delegating_informer.go +++ /dev/null @@ -1,28 +0,0 @@ -package util - -import ( - "time" - - "github.com/kcp-dev/logicalcluster/v2" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -// DelegatingInformer embeds a cache.SharedIndexInformer, delegating adding event handlers to -// registerEventHandlerForCluster. -type DelegatingInformer struct { - ClusterName logicalcluster.Name - Resource schema.GroupResource - cache.SharedIndexInformer - DelegatingEventHandler *DelegatingEventHandler -} - -// AddEventHandler registers with the delegating event handler. -func (d *DelegatingInformer) AddEventHandler(handler cache.ResourceEventHandler) { - d.DelegatingEventHandler.registerEventHandler(d.Resource, d.SharedIndexInformer, d.ClusterName, handler) -} - -// AddEventHandlerWithResyncPeriod registers with the delegating event handler with the given resync period. -func (d *DelegatingInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { - d.DelegatingEventHandler.registerEventHandlerWithResyncPeriod(d.Resource, d.SharedIndexInformer, d.ClusterName, handler, resyncPeriod) -} diff --git a/pkg/reconciler/util/scoped_generic_informer.go b/pkg/reconciler/util/scoped_generic_informer.go deleted file mode 100644 index cb9d8c14ba9..00000000000 --- a/pkg/reconciler/util/scoped_generic_informer.go +++ /dev/null @@ -1,149 +0,0 @@ -package util - -import ( - "github.com/kcp-dev/kcp/pkg/client" - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/logicalcluster/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" -) - -// scopedGenericInformer wraps an informers.GenericInformer and produces instances of cache.GenericLister that are -// scoped to a single logical cluster. -type scopedGenericInformer struct { - delegate informers.GenericInformer - clusterName logicalcluster.Name - resource schema.GroupResource - delegatingEventHandler *DelegatingEventHandler -} - -// Informer invokes Informer() on the underlying informers.GenericInformer. -func (s *scopedGenericInformer) Informer() cache.SharedIndexInformer { - return &DelegatingInformer{ - ClusterName: s.clusterName, - SharedIndexInformer: s.delegate.Informer(), - DelegatingEventHandler: s.delegatingEventHandler, - } -} - -// Lister returns an implementation of cache.GenericLister that is scoped to a single logical cluster. -func (s *scopedGenericInformer) Lister() cache.GenericLister { - return &scopedGenericLister{ - indexer: s.delegate.Informer().GetIndexer(), - clusterName: s.clusterName, - resource: s.resource, - } -} - -// scopedGenericLister wraps a cache.Indexer to implement a cache.GenericLister that is scoped to a single logical -// cluster. -type scopedGenericLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name - resource schema.GroupResource -} - -// List returns all instances from the cache.Indexer scoped to a single logical cluster and matching selector. -func (s *scopedGenericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - err = ListByIndex(s.indexer, indexers.ByLogicalCluster, s.clusterName.String(), selector, func(obj interface{}) { - ret = append(ret, obj.(runtime.Object)) - }) - return ret, err -} - -// ByNamespace returns an implementation of cache.GenericNamespaceLister that is scoped to a single logical cluster. -func (s *scopedGenericLister) ByNamespace(namespace string) cache.GenericNamespaceLister { - return &scopedGenericNamespaceLister{ - indexer: s.indexer, - clusterName: s.clusterName, - namespace: namespace, - resource: s.resource, - } -} - -// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster. -func (s *scopedGenericLister) Get(name string) (runtime.Object, error) { - key := client.ToClusterAwareKey(s.clusterName, name) - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} - -func ListByIndex(indexer cache.Indexer, indexName, indexValue string, selector labels.Selector, appendFunc func(obj interface{})) error { - selectAll := selector == nil || selector.Empty() - - list, err := indexer.ByIndex(indexName, indexValue) - if err != nil { - return err - } - - for i := range list { - if selectAll { - appendFunc(list[i]) - continue - } - - metadata, err := meta.Accessor(list[i]) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFunc(list[i]) - } - } - - return nil -} - -// scopedGenericNamespaceLister wraps a cache.Indexer to implement a cache.GenericNamespaceLister that is scoped to a -// single logical cluster. -type scopedGenericNamespaceLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name - namespace string - resource schema.GroupResource -} - -// List lists all instances from the cache.Indexer scoped to a single logical cluster and namespace, and matching -// selector. -func (s *scopedGenericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - // To support e.g. quota for cluster-scoped resources, we've hacked the k8s quota to use namespace="" when - // checking quota for cluster-scoped resources. But because all the upstream quota code is written to only - // support namespace-scoped resources, we have to hack the "namespace lister" to support returning all items - // when its namespace is "". - var indexName, indexValue string - if s.namespace == "" { - indexName = indexers.ByLogicalCluster - indexValue = s.clusterName.String() - } else { - indexName = indexers.ByLogicalClusterAndNamespace - indexValue = client.ToClusterAwareKey(s.clusterName, s.namespace) - } - err = ListByIndex(s.indexer, indexName, indexValue, selector, func(obj interface{}) { - ret = append(ret, obj.(runtime.Object)) - }) - return ret, err -} - -// Get returns the runtime.Object from the cache.Indexer identified by name, from the appropriate logical cluster and -// namespace. -func (s *scopedGenericNamespaceLister) Get(name string) (runtime.Object, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} diff --git a/pkg/reconciler/util/util.go b/pkg/reconciler/util/util.go deleted file mode 100644 index de5d1e094c5..00000000000 --- a/pkg/reconciler/util/util.go +++ /dev/null @@ -1,25 +0,0 @@ -package util - -import ( - "github.com/kcp-dev/kcp/pkg/client" - "github.com/kcp-dev/logicalcluster/v2" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" -) - -func ClusterNameForObj(obj interface{}) logicalcluster.Name { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return logicalcluster.Name{} - } - - _, clusterAndName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - utilruntime.HandleError(err) - return logicalcluster.Name{} - } - - cluster, _ := client.SplitClusterAwareKey(clusterAndName) - return cluster -}