From 61b60e7836445324cca61cd64d8d4b4bfa90eb7b Mon Sep 17 00:00:00 2001 From: ialexeze Date: Sun, 10 May 2026 00:59:46 +0000 Subject: [PATCH 1/2] chaos testing --- .../01-based-on-own-metrics/cleanup.sh | 0 .../01-based-on-own-metrics/katalog.yaml | 1 + .../01-based-on-own-metrics/load.sh | 11 + .../beginner/01-hello-website/katalog.yaml | 1 + pkg/certmanager/generate.go | 2 +- pkg/katalog/validate.go | 18 +- pkg/orkestra-registry/common/parse.go | 27 +- pkg/orkestra-registry/configmaps/configmap.go | 14 + pkg/orkestra-registry/configmaps/types.go | 23 +- pkg/orkestra-registry/cronjobs/cronjob.go | 14 + .../deployments/deployment.go | 9 + pkg/orkestra-registry/deployments/types.go | 9 +- pkg/orkestra-registry/hpas/hpa.go | 9 + pkg/orkestra-registry/hpas/types.go | 5 + pkg/orkestra-registry/ingresses/ingress.go | 9 + pkg/orkestra-registry/ingresses/types.go | 5 + pkg/orkestra-registry/jobs/job.go | 11 + pkg/orkestra-registry/namespaces/namespace.go | 13 + pkg/orkestra-registry/pdbs/pdb.go | 9 + pkg/orkestra-registry/pdbs/types.go | 5 + pkg/orkestra-registry/pods/pod.go | 9 + pkg/orkestra-registry/pods/types.go | 9 +- pkg/orkestra-registry/pvcs/pvc.go | 41 +- pkg/orkestra-registry/pvcs/types.go | 5 + pkg/orkestra-registry/pvs/pv.go | 8 + pkg/orkestra-registry/pvs/types.go | 5 + .../replicasets/replicaset.go | 9 + pkg/orkestra-registry/replicasets/types.go | 9 +- .../rolebindings/rolebinding.go | 14 + pkg/orkestra-registry/roles/role.go | 14 + pkg/orkestra-registry/secrets/secret.go | 18 +- pkg/orkestra-registry/secrets/types.go | 25 +- .../serviceaccounts/serviceaccount.go | 11 + pkg/orkestra-registry/services/services.go | 9 + pkg/orkestra-registry/services/types.go | 5 + .../statefulsets/statefulset.go | 43 +- pkg/orkestra-registry/statefulsets/types.go | 5 + pkg/orkestra-registry/template/resolver.go | 57 ++ pkg/types/docker.go | 5 + pkg/types/external.go | 5 + pkg/types/git.go | 5 + pkg/types/hook_methods.go | 532 ++++++++++++++++++ pkg/types/hook_temp.go | 139 +++++ pkg/types/hooks_sleep.go | 211 +++++++ pkg/types/methods.go | 171 ------ pkg/types/secret_rotation.go | 8 +- pkg/types/secret_rotation_test.go | 56 +- pkg/types/types.go | 283 ++++++---- .../content/docs/concepts/secret-rotation.md | 2 +- .../docs/concepts/secret-rotation/index.html | 2 +- 50 files changed, 1544 insertions(+), 366 deletions(-) mode change 100644 => 100755 examples/advanced/12-autoscale/01-based-on-own-metrics/cleanup.sh create mode 100755 examples/advanced/12-autoscale/01-based-on-own-metrics/load.sh create mode 100644 pkg/types/hook_methods.go create mode 100644 pkg/types/hook_temp.go create mode 100644 pkg/types/hooks_sleep.go diff --git a/examples/advanced/12-autoscale/01-based-on-own-metrics/cleanup.sh b/examples/advanced/12-autoscale/01-based-on-own-metrics/cleanup.sh old mode 100644 new mode 100755 diff --git a/examples/advanced/12-autoscale/01-based-on-own-metrics/katalog.yaml b/examples/advanced/12-autoscale/01-based-on-own-metrics/katalog.yaml index e399ee73..4dfa07a1 100644 --- a/examples/advanced/12-autoscale/01-based-on-own-metrics/katalog.yaml +++ b/examples/advanced/12-autoscale/01-based-on-own-metrics/katalog.yaml @@ -46,3 +46,4 @@ spec: reconcile: true services: - port: "8080" + reconcile: true diff --git a/examples/advanced/12-autoscale/01-based-on-own-metrics/load.sh b/examples/advanced/12-autoscale/01-based-on-own-metrics/load.sh new file mode 100755 index 00000000..de8e1f3c --- /dev/null +++ b/examples/advanced/12-autoscale/01-based-on-own-metrics/load.sh @@ -0,0 +1,11 @@ +for i in $(seq 1 200); do + kubectl apply -f - <-deployment # namespace defaults to: # replicas defaults to: 1 diff --git a/pkg/certmanager/generate.go b/pkg/certmanager/generate.go index e2119a7c..843468e2 100644 --- a/pkg/certmanager/generate.go +++ b/pkg/certmanager/generate.go @@ -35,7 +35,7 @@ type TLSBundle struct { func GenerateTLSBundle(commonName string, dnsNames []string, validFor string) (*TLSBundle, error) { validity := 365 * 24 * time.Hour // default: 1 year if validFor != "" { - if d, err := orktypes.ParseRotationDuration(validFor); err == nil { + if d, err := orktypes.ParseTimeDuration(validFor); err == nil { validity = d } } diff --git a/pkg/katalog/validate.go b/pkg/katalog/validate.go index a633d043..3b76cff2 100644 --- a/pkg/katalog/validate.go +++ b/pkg/katalog/validate.go @@ -448,7 +448,7 @@ func (k *Katalog) validateNamespaceProtection() error { // across all enabled CRDs. It is fail-fast: the first invalid duration // returns an error immediately. // -// Supported units (extended by ParseRotationDuration): +// Supported units (extended by ParseTimeDuration): // // d = days (24h) // w = weeks (7d) @@ -460,16 +460,24 @@ func (k *Katalog) validateTimeDuration() error { continue } + // Validate all declared sleep durations (discovered by pkg/types) + for _, e := range crd.CollectSleepEntries() { + if _, err := orktypes.ParseTimeDuration(e.Duration); err != nil { + return durationError(name, e.ResourceName, "sleep", e.Duration, err) + } + } + + // Validate secret durations (rotateAfter, TLS.validFor) if crd.HasOnCreate() { for _, s := range crd.OperatorBox.OnCreate.Secrets { if s.RotateAfter != "" { - if _, err := orktypes.ParseRotationDuration(s.RotateAfter); err != nil { + if _, err := orktypes.ParseTimeDuration(s.RotateAfter); err != nil { return durationError(name, s.Name, "rotateAfter", s.RotateAfter, err) } } // Check per-secret TLS presence if s.TLS != nil && s.TLS.ValidFor != "" { - if _, err := orktypes.ParseRotationDuration(s.TLS.ValidFor); err != nil { + if _, err := orktypes.ParseTimeDuration(s.TLS.ValidFor); err != nil { return durationError(name, s.Name, "validFor", s.TLS.ValidFor, err) } } @@ -479,13 +487,13 @@ func (k *Katalog) validateTimeDuration() error { if crd.HasOnReconcile() { for _, s := range crd.OperatorBox.OnReconcile.Secrets { if s.RotateAfter != "" { - if _, err := orktypes.ParseRotationDuration(s.RotateAfter); err != nil { + if _, err := orktypes.ParseTimeDuration(s.RotateAfter); err != nil { return durationError(name, s.Name, "rotateAfter", s.RotateAfter, err) } } // Check per-secret TLS presence if s.TLS != nil && s.TLS.ValidFor != "" { - if _, err := orktypes.ParseRotationDuration(s.TLS.ValidFor); err != nil { + if _, err := orktypes.ParseTimeDuration(s.TLS.ValidFor); err != nil { return durationError(name, s.Name, "validFor", s.TLS.ValidFor, err) } } diff --git a/pkg/orkestra-registry/common/parse.go b/pkg/orkestra-registry/common/parse.go index 66e425fa..ced1ca5a 100644 --- a/pkg/orkestra-registry/common/parse.go +++ b/pkg/orkestra-registry/common/parse.go @@ -1,6 +1,11 @@ package common -import "fmt" +import ( + "fmt" + "time" + + orktypes "github.com/orkspace/orkestra/pkg/types" +) // ParseBool interprets common boolean representations from template expressions. func ParseBool(s string) bool { @@ -18,3 +23,23 @@ func ParsePort(s string) int { fmt.Sscanf(s, "%d", &p) return p } + +// SleepIfNeeded parses an extended duration string and sleeps if non-zero. +// Used by all operatorBox resources to inject artificial latency for +// autoscaling tests, chaos engineering, and latency simulation. +func SleepIfNeeded(s string) error { + if s == "" { + return nil + } + + d, err := orktypes.ParseTimeDuration(s) + if err != nil { + return err + } + + if d > 0 { + time.Sleep(d) + } + + return nil +} diff --git a/pkg/orkestra-registry/configmaps/configmap.go b/pkg/orkestra-registry/configmaps/configmap.go index 3c9008c4..542a2f5d 100644 --- a/pkg/orkestra-registry/configmaps/configmap.go +++ b/pkg/orkestra-registry/configmaps/configmap.go @@ -40,6 +40,11 @@ type ResolvedConfigMapSpec struct { // Labels — applied to ConfigMap metadata. Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a ConfigMap if it does not already exist. @@ -51,6 +56,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().ConfigMaps(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -94,6 +102,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().CoreV1().ConfigMaps(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -139,6 +150,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the ConfigMap if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedConfigMapSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().ConfigMaps(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/configmaps/types.go b/pkg/orkestra-registry/configmaps/types.go index a15b40a7..945064c4 100644 --- a/pkg/orkestra-registry/configmaps/types.go +++ b/pkg/orkestra-registry/configmaps/types.go @@ -47,36 +47,41 @@ import orktypes "github.com/orkspace/orkestra/pkg/types" // - production type ConfigMapTemplateSource struct { // Version — OrkestraRegistry implementation version. Omit for latest. - Version string `yaml:"version" validate:"omitempty"` + Version string // Name — ConfigMap name. // Default: "{{ .metadata.name }}-config" - Name string `yaml:"name" validate:"omitempty"` + Name string // Namespace — primary target namespace. // Default: "{{ .metadata.namespace }}" - Namespace string `yaml:"namespace" validate:"omitempty"` + Namespace string // ToNamespaces — create one copy in each listed namespace. // Each element supports template expressions. - ToNamespaces []string `yaml:"toNamespaces" validate:"omitempty"` + ToNamespaces []string // FromConfigMap — name of an existing ConfigMap to copy data from. // Orkestra reads this at reconcile time — copies stay in sync with the source. - FromConfigMap string `yaml:"fromConfigMap" validate:"omitempty"` + FromConfigMap string // FromNamespace — namespace where FromConfigMap lives. // Default: same namespace as the CR. - FromNamespace string `yaml:"fromNamespace" validate:"omitempty"` + FromNamespace string // Data — static key-value entries. // When FromConfigMap is also set, these entries override matching keys from the source. - Data map[string]string `yaml:"data" validate:"omitempty"` + Data map[string]string // Labels — applied to all created ConfigMap copies. - Labels []orktypes.ResourceLabel `yaml:"labels" validate:"omitempty"` + Labels []orktypes.ResourceLabel // Reconcile: true — sync on every reconcile. // When true, if the source ConfigMap changes, all copies are updated automatically. - Reconcile bool `yaml:"reconcile" validate:"omitempty"` + Reconcile bool + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/cronjobs/cronjob.go b/pkg/orkestra-registry/cronjobs/cronjob.go index e9a3d897..716bec98 100644 --- a/pkg/orkestra-registry/cronjobs/cronjob.go +++ b/pkg/orkestra-registry/cronjobs/cronjob.go @@ -69,6 +69,11 @@ type ResolvedCronJobSpec struct { // for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. ImagePullSecrets []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a CronJob if it does not already exist. @@ -80,6 +85,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().BatchV1().CronJobs(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -118,6 +126,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().BatchV1().CronJobs(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -220,6 +231,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the CronJob if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedCronJobSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().BatchV1().CronJobs(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/deployments/deployment.go b/pkg/orkestra-registry/deployments/deployment.go index 98cbd13c..21576676 100644 --- a/pkg/orkestra-registry/deployments/deployment.go +++ b/pkg/orkestra-registry/deployments/deployment.go @@ -28,6 +28,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().AppsV1().Deployments(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -66,6 +69,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().AppsV1().Deployments(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -128,6 +134,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // for explicit cleanup declared in onDelete templates. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedDeploymentSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().AppsV1().Deployments(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/deployments/types.go b/pkg/orkestra-registry/deployments/types.go index dd1c950d..d366a616 100644 --- a/pkg/orkestra-registry/deployments/types.go +++ b/pkg/orkestra-registry/deployments/types.go @@ -41,15 +41,20 @@ type ResolvedDeploymentSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional // +mapType=atomic - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string // ServiceAccountName is the name of the ServiceAccount to use to run this pod. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` + ServiceAccountName string // ImagePullSecrets is an optional list of references to secrets in the same namespace to use // for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. ImagePullSecrets []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/hpas/hpa.go b/pkg/orkestra-registry/hpas/hpa.go index 09a0bb60..2e4cc55c 100644 --- a/pkg/orkestra-registry/hpas/hpa.go +++ b/pkg/orkestra-registry/hpas/hpa.go @@ -28,6 +28,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -66,6 +69,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -116,6 +122,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the HPA if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedHPASpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().AutoscalingV2().HorizontalPodAutoscalers(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/hpas/types.go b/pkg/orkestra-registry/hpas/types.go index 47eb92e9..854c46be 100644 --- a/pkg/orkestra-registry/hpas/types.go +++ b/pkg/orkestra-registry/hpas/types.go @@ -32,4 +32,9 @@ type ResolvedHPASpec struct { // Labels applied to HPA metadata. // Orkestra always adds: managed-by=orkestra, orkestra-owner= Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/ingresses/ingress.go b/pkg/orkestra-registry/ingresses/ingress.go index e592a26d..3b6096f1 100644 --- a/pkg/orkestra-registry/ingresses/ingress.go +++ b/pkg/orkestra-registry/ingresses/ingress.go @@ -27,6 +27,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().NetworkingV1().Ingresses(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -65,6 +68,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().NetworkingV1().Ingresses(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -133,6 +139,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the Ingress if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedIngressSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().NetworkingV1().Ingresses(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/ingresses/types.go b/pkg/orkestra-registry/ingresses/types.go index c2cba1db..ed2522c0 100644 --- a/pkg/orkestra-registry/ingresses/types.go +++ b/pkg/orkestra-registry/ingresses/types.go @@ -38,6 +38,11 @@ type ResolvedIngressSpec struct { // TLS — optional TLS configuration. nil means no TLS. TLS *ResolvedIngressTLS + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // ResolvedIngressTLS holds the fully resolved TLS configuration for an Ingress. diff --git a/pkg/orkestra-registry/jobs/job.go b/pkg/orkestra-registry/jobs/job.go index 100357a1..3affaae7 100644 --- a/pkg/orkestra-registry/jobs/job.go +++ b/pkg/orkestra-registry/jobs/job.go @@ -46,6 +46,11 @@ type ResolvedJobSpec struct { // for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. ImagePullSecrets []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a Job if it does not already exist. @@ -62,6 +67,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().BatchV1().Jobs(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -94,6 +102,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the Job if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedJobSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } propagation := metav1.DeletePropagationForeground err := kube.Clientset().BatchV1().Jobs(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{ diff --git a/pkg/orkestra-registry/namespaces/namespace.go b/pkg/orkestra-registry/namespaces/namespace.go index 585e5ae1..b10d5416 100644 --- a/pkg/orkestra-registry/namespaces/namespace.go +++ b/pkg/orkestra-registry/namespaces/namespace.go @@ -9,6 +9,7 @@ import ( "github.com/orkspace/orkestra/pkg/kubeclient" "github.com/orkspace/orkestra/pkg/labels" "github.com/orkspace/orkestra/pkg/logger" + "github.com/orkspace/orkestra/pkg/orkestra-registry/common" orktypes "github.com/orkspace/orkestra/pkg/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -26,6 +27,11 @@ type ResolvedNamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // optional Finalizers []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a Namespace if it does not already exist. @@ -40,6 +46,10 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec return fmt.Errorf("namespace.Create: invalid spec: %w", err) } + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } + _, err := kube.Clientset().CoreV1().Namespaces().Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("namespace.Create: checking existence of %q: %w", spec.Name, err) @@ -70,6 +80,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // For most cases owner references handle cleanup automatically — // only use this when explicit cleanup control is needed. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedNamespaceSpec) error { + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().Namespaces().Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/pdbs/pdb.go b/pkg/orkestra-registry/pdbs/pdb.go index c8647c18..219eb401 100644 --- a/pkg/orkestra-registry/pdbs/pdb.go +++ b/pkg/orkestra-registry/pdbs/pdb.go @@ -29,6 +29,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().PolicyV1().PodDisruptionBudgets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -67,6 +70,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().PolicyV1().PodDisruptionBudgets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -116,6 +122,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the PDB if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPDBSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().PolicyV1().PodDisruptionBudgets(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/pdbs/types.go b/pkg/orkestra-registry/pdbs/types.go index 1b2453a9..b3bc253d 100644 --- a/pkg/orkestra-registry/pdbs/types.go +++ b/pkg/orkestra-registry/pdbs/types.go @@ -27,4 +27,9 @@ type ResolvedPDBSpec struct { // Labels applied to PDB metadata. // Orkestra always adds: managed-by=orkestra, orkestra-owner= Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/pods/pod.go b/pkg/orkestra-registry/pods/pod.go index eb5253be..c0c5be7d 100644 --- a/pkg/orkestra-registry/pods/pod.go +++ b/pkg/orkestra-registry/pods/pod.go @@ -26,6 +26,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().Pods(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -64,6 +67,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().CoreV1().Pods(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -103,6 +109,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // only use this when you need explicit cleanup control in onDelete. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPodSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().Pods(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/pods/types.go b/pkg/orkestra-registry/pods/types.go index 8e7ca10c..b1528c6b 100644 --- a/pkg/orkestra-registry/pods/types.go +++ b/pkg/orkestra-registry/pods/types.go @@ -35,15 +35,20 @@ type ResolvedPodSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional // +mapType=atomic - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string // ServiceAccountName is the name of the ServiceAccount to use to run this pod. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` + ServiceAccountName string // ImagePullSecrets is an optional list of references to secrets in the same namespace to use // for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. ImagePullSecrets []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/pvcs/pvc.go b/pkg/orkestra-registry/pvcs/pvc.go index 875d4b95..110488b0 100644 --- a/pkg/orkestra-registry/pvcs/pvc.go +++ b/pkg/orkestra-registry/pvcs/pvc.go @@ -21,33 +21,39 @@ import ( // Create creates a PVC owned by the CR if it does not already exist. func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPVCSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - _, err := kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Get(ctx, spec.Name, metav1.GetOptions{}) + _, err := kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("pvc.Create: checking existence of %q: %w", spec.Name, err) } if err == nil { - logger.Debug().Str("pvc", spec.Name).Str("namespace", ns).Msg("pvc already exists — skipping create") + logger.Debug().Str("pvc", spec.Name).Str("namespace", namespace).Msg("pvc already exists — skipping create") return nil } - pvc := buildPVC(owner, spec, ns) - _, err = kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) + pvc := buildPVC(owner, spec, namespace) + _, err = kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("pvc.Create: creating %q in %q: %w", spec.Name, ns, err) + return fmt.Errorf("pvc.Create: creating %q in %q: %w", spec.Name, namespace, err) } - logger.Info().Str("pvc", spec.Name).Str("namespace", ns).Str("owner", owner.GetName()).Msg("pvc created") + logger.Info().Str("pvc", spec.Name).Str("namespace", namespace).Str("owner", owner.GetName()).Msg("pvc created") return nil } // Update reconciles a PVC. PVC spec is largely immutable after creation; // only labels are patched on drift. func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPVCSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - existing, err := kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Get(ctx, spec.Name, metav1.GetOptions{}) + existing, err := kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return Create(ctx, kube, owner, spec) @@ -70,20 +76,23 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec return nil } - _, err = kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Update(ctx, updated, metav1.UpdateOptions{}) + _, err = kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Update(ctx, updated, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("pvc.Update: updating %q: %w", spec.Name, err) } - logger.Info().Str("pvc", spec.Name).Str("namespace", ns).Msg("pvc updated") + logger.Info().Str("pvc", spec.Name).Str("namespace", namespace).Msg("pvc updated") return nil } // Delete deletes the PVC if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPVCSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - err := kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Delete(ctx, spec.Name, metav1.DeleteOptions{}) + err := kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -95,8 +104,8 @@ func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } // DeleteIfOwned deletes the PVC only if it is owned by the given CR. -func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, name, ns string) error { - existing, err := kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Get(ctx, name, metav1.GetOptions{}) +func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, name, namespace string) error { + existing, err := kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -106,7 +115,7 @@ func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domai if existing.Labels[labels.OrkestraOwner] != owner.GetName() { return nil } - return kube.Clientset().CoreV1().PersistentVolumeClaims(ns).Delete(ctx, name, metav1.DeleteOptions{}) + return kube.Clientset().CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, name, metav1.DeleteOptions{}) } // Resolve builds a ResolvedPVCSpec from a PVCTemplateSource. diff --git a/pkg/orkestra-registry/pvcs/types.go b/pkg/orkestra-registry/pvcs/types.go index 3fd545f0..3e056cd5 100644 --- a/pkg/orkestra-registry/pvcs/types.go +++ b/pkg/orkestra-registry/pvcs/types.go @@ -11,4 +11,9 @@ type ResolvedPVCSpec struct { VolumeMode string VolumeName string Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/pvs/pv.go b/pkg/orkestra-registry/pvs/pv.go index a0917ecc..b095f33e 100644 --- a/pkg/orkestra-registry/pvs/pv.go +++ b/pkg/orkestra-registry/pvs/pv.go @@ -9,6 +9,7 @@ import ( "github.com/orkspace/orkestra/pkg/kubeclient" "github.com/orkspace/orkestra/pkg/labels" "github.com/orkspace/orkestra/pkg/logger" + "github.com/orkspace/orkestra/pkg/orkestra-registry/common" orktypes "github.com/orkspace/orkestra/pkg/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -19,6 +20,9 @@ import ( // Create creates a PersistentVolume if it does not already exist. // PVs are cluster-scoped — owner references are set as labels only. func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPVSpec) error { + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().PersistentVolumes().Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("pv.Create: checking existence of %q: %w", spec.Name, err) @@ -40,6 +44,10 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Update reconciles an existing PV. Capacity and reclaim policy are patched on drift. func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedPVSpec) error { + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } + existing, err := kube.Clientset().CoreV1().PersistentVolumes().Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { diff --git a/pkg/orkestra-registry/pvs/types.go b/pkg/orkestra-registry/pvs/types.go index 1df1c1f4..d7fed28f 100644 --- a/pkg/orkestra-registry/pvs/types.go +++ b/pkg/orkestra-registry/pvs/types.go @@ -13,4 +13,9 @@ type ResolvedPVSpec struct { CSIDriver string CSIVolumeHandle string Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/replicasets/replicaset.go b/pkg/orkestra-registry/replicasets/replicaset.go index c9e537bf..404f46a8 100644 --- a/pkg/orkestra-registry/replicasets/replicaset.go +++ b/pkg/orkestra-registry/replicasets/replicaset.go @@ -27,6 +27,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().AppsV1().ReplicaSets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -64,6 +67,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().AppsV1().ReplicaSets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -125,6 +131,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the ReplicaSet if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedReplicaSetSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().AppsV1().ReplicaSets(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/replicasets/types.go b/pkg/orkestra-registry/replicasets/types.go index 3e502875..dd2e6c1e 100644 --- a/pkg/orkestra-registry/replicasets/types.go +++ b/pkg/orkestra-registry/replicasets/types.go @@ -41,15 +41,20 @@ type ResolvedReplicaSetSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional // +mapType=atomic - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string // ServiceAccountName is the name of the ServiceAccount to use to run this pod. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` + ServiceAccountName string // ImagePullSecrets is an optional list of references to secrets in the same namespace to use // for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. ImagePullSecrets []string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/rolebindings/rolebinding.go b/pkg/orkestra-registry/rolebindings/rolebinding.go index 13f4ff23..9c645e50 100644 --- a/pkg/orkestra-registry/rolebindings/rolebinding.go +++ b/pkg/orkestra-registry/rolebindings/rolebinding.go @@ -24,6 +24,11 @@ type ResolvedRoleBindingSpec struct { Labels map[string]string RoleRef rbacv1.RoleRef Subjects []rbacv1.Subject + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a RoleBinding if it does not already exist. @@ -35,6 +40,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().RbacV1().RoleBindings(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -68,6 +76,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // RoleRef is immutable in Kubernetes — if it changed the binding is deleted and recreated. func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedRoleBindingSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().RbacV1().RoleBindings(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -105,6 +116,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the RoleBinding if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedRoleBindingSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().RbacV1().RoleBindings(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/roles/role.go b/pkg/orkestra-registry/roles/role.go index b8b774df..26733572 100644 --- a/pkg/orkestra-registry/roles/role.go +++ b/pkg/orkestra-registry/roles/role.go @@ -23,6 +23,11 @@ type ResolvedRoleSpec struct { Namespace string Labels map[string]string Rules []rbacv1.PolicyRule + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a Role if it does not already exist. @@ -34,6 +39,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().RbacV1().Roles(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -66,6 +74,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Update applies the desired rules to an existing Role. func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedRoleSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().RbacV1().Roles(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -95,6 +106,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the Role if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedRoleSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().RbacV1().Roles(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/secrets/secret.go b/pkg/orkestra-registry/secrets/secret.go index ce496aaf..a406c5bf 100644 --- a/pkg/orkestra-registry/secrets/secret.go +++ b/pkg/orkestra-registry/secrets/secret.go @@ -31,11 +31,11 @@ type ResolvedSecretSpec struct { StringData map[string]string // Data - Data map[string][]byte `yaml:"data" validate:"omitempty"` + Data map[string][]byte // Type — Kubernetes Secret type. // Default: "Opaque" - Type string `yaml:"type" validate:"omitempty"` + Type string // FromSecret — name of a source Secret to copy from. // When set, copies all keys from the source into the target. @@ -51,6 +51,11 @@ type ResolvedSecretSpec struct { // Annotations — applied to Secret metadata. Annotations map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a Secret in the target namespace if it does not already exist. @@ -63,6 +68,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().Secrets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -107,6 +115,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().CoreV1().Secrets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -155,6 +166,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the Secret if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedSecretSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().Secrets(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/secrets/types.go b/pkg/orkestra-registry/secrets/types.go index b5b8a3a6..a4f5324e 100644 --- a/pkg/orkestra-registry/secrets/types.go +++ b/pkg/orkestra-registry/secrets/types.go @@ -40,47 +40,52 @@ import orktypes "github.com/orkspace/orkestra/pkg/types" // All copies are owned by the CR — deleted automatically when the CR is deleted. type SecretTemplateSource struct { // Version — OrkestraRegistry implementation version. Omit for latest. - Version string `yaml:"version" validate:"omitempty"` + Version string // Name — Secret name in the target namespace. // Default: "{{ .metadata.name }}-secret" - Name string `yaml:"name" validate:"omitempty"` + Name string // Namespace — primary target namespace. // Default: "{{ .metadata.namespace }}" // When ToNamespaces is set, this field is ignored. - Namespace string `yaml:"namespace" validate:"omitempty"` + Namespace string // ToNamespaces — create one copy of this Secret in each listed namespace. // Each element supports template expressions. // e.g. ["{{ .metadata.namespace }}", "monitoring", "staging"] // When set, Namespace is ignored — ToNamespaces controls all target namespaces. - ToNamespaces []string `yaml:"toNamespaces" validate:"omitempty"` + ToNamespaces []string // FromSecret — name of an existing Secret to copy data from. // When set, Orkestra reads this Secret at reconcile time and copies its data. // This means the copy stays in sync — if the source changes, the copy updates. // Omit to use static Data entries instead. - FromSecret string `yaml:"fromSecret" validate:"omitempty"` + FromSecret string // FromNamespace — namespace where FromSecret lives. // Default: same namespace as the CR. - FromNamespace string `yaml:"fromNamespace" validate:"omitempty"` + FromNamespace string // Data — static key-value Secret entries (string values). // Kubernetes encodes them to base64 automatically. // When FromSecret is also set, these entries override matching keys from the source. - Data map[string]string `yaml:"data" validate:"omitempty"` + Data map[string]string // Type — Kubernetes Secret type. // Default: Opaque. // e.g. "kubernetes.io/tls", "kubernetes.io/dockerconfigjson" - Type string `yaml:"type" validate:"omitempty"` + Type string // Labels — applied to all created Secret copies. - Labels []orktypes.ResourceLabel `yaml:"labels" validate:"omitempty"` + Labels []orktypes.ResourceLabel // Reconcile: true — also sync on every reconcile (drift correction). // When true, if the source Secret changes, all copies are updated automatically. - Reconcile bool `yaml:"reconcile" validate:"omitempty"` + Reconcile bool + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/serviceaccounts/serviceaccount.go b/pkg/orkestra-registry/serviceaccounts/serviceaccount.go index 617f8ed4..3f4c2610 100644 --- a/pkg/orkestra-registry/serviceaccounts/serviceaccount.go +++ b/pkg/orkestra-registry/serviceaccounts/serviceaccount.go @@ -27,6 +27,11 @@ type ResolvedServiceAccountSpec struct { // Labels — applied to ServiceAccount metadata. Labels map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // Create creates a ServiceAccount if it does not already exist. @@ -42,6 +47,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().ServiceAccounts(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -76,6 +84,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // only use this when explicit cleanup control is needed. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedServiceAccountSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().ServiceAccounts(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/services/services.go b/pkg/orkestra-registry/services/services.go index 7a26b23d..b447ce47 100644 --- a/pkg/orkestra-registry/services/services.go +++ b/pkg/orkestra-registry/services/services.go @@ -30,6 +30,9 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } _, err := kube.Clientset().CoreV1().Services(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -68,6 +71,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } existing, err := kube.Clientset().CoreV1().Services(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { @@ -118,6 +124,9 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Delete deletes the Service if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedServiceSpec) error { namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } err := kube.Clientset().CoreV1().Services(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { diff --git a/pkg/orkestra-registry/services/types.go b/pkg/orkestra-registry/services/types.go index dd091f45..258ba208 100644 --- a/pkg/orkestra-registry/services/types.go +++ b/pkg/orkestra-registry/services/types.go @@ -38,4 +38,9 @@ type ResolvedServiceSpec struct { // Selector —> service selector to route traffic to pods. Selector map[string]string + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } diff --git a/pkg/orkestra-registry/statefulsets/statefulset.go b/pkg/orkestra-registry/statefulsets/statefulset.go index 822bd48d..f548c5cd 100644 --- a/pkg/orkestra-registry/statefulsets/statefulset.go +++ b/pkg/orkestra-registry/statefulsets/statefulset.go @@ -23,29 +23,32 @@ import ( // Create creates a StatefulSet owned by the CR if it does not already exist. func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedStatefulSetSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - _, err := kube.Clientset().AppsV1().StatefulSets(ns).Get(ctx, spec.Name, metav1.GetOptions{}) + _, err := kube.Clientset().AppsV1().StatefulSets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("statefulset.Create: checking existence of %q: %w", spec.Name, err) } if err == nil { logger.Debug(). Str("statefulset", spec.Name). - Str("namespace", ns). + Str("namespace", namespace). Msg("statefulset already exists — skipping create") return nil } - sts := buildStatefulSet(owner, spec, ns) - _, err = kube.Clientset().AppsV1().StatefulSets(ns).Create(ctx, sts, metav1.CreateOptions{}) + sts := buildStatefulSet(owner, spec, namespace) + _, err = kube.Clientset().AppsV1().StatefulSets(namespace).Create(ctx, sts, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("statefulset.Create: creating %q in %q: %w", spec.Name, ns, err) + return fmt.Errorf("statefulset.Create: creating %q in %q: %w", spec.Name, namespace, err) } logger.Info(). Str("statefulset", spec.Name). - Str("namespace", ns). + Str("namespace", namespace). Str("owner", owner.GetName()). Msg("statefulset created") return nil @@ -54,9 +57,12 @@ func Create(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec // Update reconciles an existing StatefulSet to match the resolved spec. // Patches replicas and image when drift is detected. func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedStatefulSetSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - existing, err := kube.Clientset().AppsV1().StatefulSets(ns).Get(ctx, spec.Name, metav1.GetOptions{}) + existing, err := kube.Clientset().AppsV1().StatefulSets(namespace).Get(ctx, spec.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return Create(ctx, kube, owner, spec) @@ -64,7 +70,7 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec return fmt.Errorf("statefulset.Update: getting %q: %w", spec.Name, err) } - desired := buildStatefulSet(owner, spec, ns) + desired := buildStatefulSet(owner, spec, namespace) drifted := false updated := existing.DeepCopy() @@ -83,20 +89,23 @@ func Update(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec return nil } - _, err = kube.Clientset().AppsV1().StatefulSets(ns).Update(ctx, updated, metav1.UpdateOptions{}) + _, err = kube.Clientset().AppsV1().StatefulSets(namespace).Update(ctx, updated, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("statefulset.Update: updating %q: %w", spec.Name, err) } - logger.Info().Str("statefulset", spec.Name).Str("namespace", ns).Msg("statefulset updated") + logger.Info().Str("statefulset", spec.Name).Str("namespace", namespace).Msg("statefulset updated") return nil } // Delete deletes the StatefulSet if it exists. func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, spec ResolvedStatefulSetSpec) error { - ns := common.ResolveNamespace(owner, spec.Namespace) + namespace := common.ResolveNamespace(owner, spec.Namespace) + if err := common.SleepIfNeeded(spec.Sleep); err != nil { + return err + } - err := kube.Clientset().AppsV1().StatefulSets(ns).Delete(ctx, spec.Name, metav1.DeleteOptions{}) + err := kube.Clientset().AppsV1().StatefulSets(namespace).Delete(ctx, spec.Name, metav1.DeleteOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -108,8 +117,8 @@ func Delete(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Objec } // DeleteIfOwned deletes the StatefulSet only if it is owned by the given CR. -func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, name, ns string) error { - existing, err := kube.Clientset().AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) +func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domain.Object, name, namespace string) error { + existing, err := kube.Clientset().AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -119,7 +128,7 @@ func DeleteIfOwned(ctx context.Context, kube *kubeclient.Kubeclient, owner domai if existing.Labels[labels.OrkestraOwner] != owner.GetName() { return nil } - return kube.Clientset().AppsV1().StatefulSets(ns).Delete(ctx, name, metav1.DeleteOptions{}) + return kube.Clientset().AppsV1().StatefulSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) } // Resolve builds a ResolvedStatefulSetSpec from a StatefulSetTemplateSource. diff --git a/pkg/orkestra-registry/statefulsets/types.go b/pkg/orkestra-registry/statefulsets/types.go index a18da805..10a6cf5a 100644 --- a/pkg/orkestra-registry/statefulsets/types.go +++ b/pkg/orkestra-registry/statefulsets/types.go @@ -42,6 +42,11 @@ type ResolvedStatefulSetSpec struct { ImagePullSecrets []string VolumeClaimRetentionPolicy VolumeClaimRetentionPolicy + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string } // VolumeClaimRetentionPolicy describes the policy used for PVCs diff --git a/pkg/orkestra-registry/template/resolver.go b/pkg/orkestra-registry/template/resolver.go index 65f64955..1e3e8639 100644 --- a/pkg/orkestra-registry/template/resolver.go +++ b/pkg/orkestra-registry/template/resolver.go @@ -173,6 +173,9 @@ func (r *Resolver) ResolvePodTemplate(src orktypes.PodTemplateSource) (orktypes. if resolved.Annotations, err = r.ResolveLabels(src.Annotations); err != nil { return resolved, fmt.Errorf("pod.annotations: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("pod.sleep: %w", err) + } return resolved, nil } @@ -222,6 +225,9 @@ func (r *Resolver) ResolveDeploymentTemplate(src orktypes.DeploymentTemplateSour if resolved.ServiceAccountName, err = r.Resolve(src.ServiceAccountName); err != nil { return resolved, fmt.Errorf("deployment.serviceAccountName: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("deployment.sleep: %w", err) + } // Env resolution if len(src.Env) > 0 { @@ -329,6 +335,10 @@ func (r *Resolver) ResolveReplicaSetTemplate(src orktypes.ReplicaSetTemplateSour return resolved, fmt.Errorf("replicaet.serviceAccountName: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("replicaset.sleep: %w", err) + } + // Labels if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("replicaset.labels: %w", err) @@ -425,6 +435,9 @@ func (r *Resolver) ResolveServiceTemplate(src orktypes.ServiceTemplateSource) (o if resolved.TargetPort, err = r.Resolve(src.TargetPort); err != nil { return resolved, fmt.Errorf("service.targetPort: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("service.sleep: %w", err) + } ns := src.Namespace if ns == "" { @@ -461,6 +474,10 @@ func (r *Resolver) ResolveNamespaceTemplate(src orktypes.NamespaceTemplateSource return resolved, fmt.Errorf("namespace.labels: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("namespace.sleep: %w", err) + } + for i, a := range src.Finalizers { rv, e := r.Resolve(a) if e != nil { @@ -487,6 +504,9 @@ func (r *Resolver) ResolveJobTemplate(src orktypes.JobTemplateSource) (orktypes. if resolved.Image, err = r.Resolve(src.Image); err != nil { return resolved, fmt.Errorf("job.image: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("job.sleep: %w", err) + } if resolved.ImagePullSecrets, err = r.ResolveStringSlice(src.ImagePullSecrets); err != nil { return resolved, fmt.Errorf("job.imagePullSecrets: %w", err) } @@ -552,6 +572,9 @@ func (r *Resolver) ResolveSecretTemplate(src orktypes.SecretTemplateSource) (ork if resolved.Namespace, err = r.Resolve(src.Namespace); err != nil { return resolved, fmt.Errorf("secret.namespace: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("secret.sleep: %w", err) + } if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("secret.data: %w", err) } @@ -633,6 +656,9 @@ func (r *Resolver) ResolveConfigMapTemplate(src orktypes.ConfigMapTemplateSource if resolved.FromNamespace, err = r.Resolve(src.FromNamespace); err != nil { return resolved, fmt.Errorf("configmap.fromNamespace: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("configmap.sleep: %w", err) + } if resolved.FromConfigMap, err = r.Resolve(src.FromConfigMap); err != nil { return resolved, fmt.Errorf("configmap.fromConfigMap: %w", err) } @@ -713,6 +739,9 @@ func (r *Resolver) ResolveCronJobTemplate(src orktypes.CronJobTemplateSource) (o if resolved.Image, err = r.Resolve(src.Image); err != nil { return resolved, fmt.Errorf("cronjob.image: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("cronjob.sleep: %w", err) + } if resolved.ImagePullSecrets, err = r.ResolveStringSlice(src.ImagePullSecrets); err != nil { return resolved, fmt.Errorf("cronjob.imagePullSecrets: %w", err) } @@ -773,6 +802,9 @@ func (r *Resolver) ResolveServiceAccountTemplate(src orktypes.ServiceAccountTemp if resolved.Namespace, err = r.Resolve(ns); err != nil { return resolved, fmt.Errorf("serviceaccount.namespace: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("serviceAccount.sleep: %w", err) + } if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("serviceaccount.labels: %w", err) @@ -801,6 +833,9 @@ func (r *Resolver) ResolveRoleTemplate(src orktypes.RoleTemplateSource) (orktype if resolved.Namespace, err = r.Resolve(ns); err != nil { return resolved, fmt.Errorf("role.namespace: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("role.sleep: %w", err) + } if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("role.labels: %w", err) @@ -846,6 +881,9 @@ func (r *Resolver) ResolveRoleBindingTemplate(src orktypes.RoleBindingTemplateSo if resolved.Namespace, err = r.Resolve(ns); err != nil { return resolved, fmt.Errorf("rolebinding.namespace: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("rolebinding.sleep: %w", err) + } if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("rolebinding.labels: %w", err) @@ -899,6 +937,9 @@ func (r *Resolver) ResolveIngressTemplate(src orktypes.IngressTemplateSource) (o if resolved.IngressClass, err = r.Resolve(src.IngressClass); err != nil { return resolved, fmt.Errorf("ingress.ingressClass: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("ingress.sleep: %w", err) + } ns := src.Namespace if ns == "" { @@ -965,6 +1006,9 @@ func (r *Resolver) ResolveHPATemplate(src orktypes.HPATemplateSource) (orktypes. if resolved.TargetCPUUtilizationPercentage, err = r.Resolve(src.TargetCPUUtilizationPercentage); err != nil { return resolved, fmt.Errorf("hpa.targetCPUUtilizationPercentage: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("hpa.sleep: %w", err) + } ns := src.Namespace if ns == "" { @@ -1015,6 +1059,10 @@ func (r *Resolver) ResolvePDBTemplate(src orktypes.PDBTemplateSource) (orktypes. return resolved, fmt.Errorf("pdb.selector: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("pdb.sleep: %w", err) + } + return resolved, nil } @@ -1057,6 +1105,9 @@ func (r *Resolver) ResolveStatefulSetTemplate(src orktypes.StatefulSetTemplateSo if resolved.MountPath, err = r.Resolve(src.MountPath); err != nil { return resolved, fmt.Errorf("statefulset.mountPath: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("statefulset.sleep: %w", err) + } ns := src.Namespace if ns == "" { @@ -1155,6 +1206,9 @@ func (r *Resolver) ResolvePVCTemplate(src orktypes.PVCTemplateSource) (orktypes. if resolved.VolumeName, err = r.Resolve(src.VolumeName); err != nil { return resolved, fmt.Errorf("pvc.volumeName: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("pvc.sleep: %w", err) + } ns := src.Namespace if ns == "" { @@ -1201,6 +1255,9 @@ func (r *Resolver) ResolvePVTemplate(src orktypes.PVTemplateSource) (orktypes.PV if resolved.CSIVolumeHandle, err = r.Resolve(src.CSIVolumeHandle); err != nil { return resolved, fmt.Errorf("pv.csiVolumeHandle: %w", err) } + if resolved.Sleep, err = r.Resolve(src.Sleep); err != nil { + return resolved, fmt.Errorf("pv.sleep: %w", err) + } if resolved.Labels, err = r.ResolveLabels(src.Labels); err != nil { return resolved, fmt.Errorf("pv.labels: %w", err) diff --git a/pkg/types/docker.go b/pkg/types/docker.go index 98d36d54..0fb01fb6 100644 --- a/pkg/types/docker.go +++ b/pkg/types/docker.go @@ -117,4 +117,9 @@ type DockerHookSpec struct { When []Condition `yaml:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } diff --git a/pkg/types/external.go b/pkg/types/external.go index 752c0af3..c409a6c6 100644 --- a/pkg/types/external.go +++ b/pkg/types/external.go @@ -88,6 +88,11 @@ type ExternalCallSpec struct { // The result is not injected when skipped. Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ExternalCallResult is the result of one HTTP call, injected into the resolver diff --git a/pkg/types/git.go b/pkg/types/git.go index 359e1fc0..b833f9b8 100644 --- a/pkg/types/git.go +++ b/pkg/types/git.go @@ -63,4 +63,9 @@ type GitHookSpec struct { When []Condition `yaml:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } diff --git a/pkg/types/hook_methods.go b/pkg/types/hook_methods.go new file mode 100644 index 00000000..aca1cfba --- /dev/null +++ b/pkg/types/hook_methods.go @@ -0,0 +1,532 @@ +package types + +// HasAnyHooks reports whether this CRD declares any onCreate, onReconcile, or onDelete hooks. +func (c *CRDEntry) HasAnyHooks() bool { + return c.HasOnCreate() || c.HasOnReconcile() || c.HasOnDelete() +} + +// HasAnyDeployments reports whether this CRD defines any Deployments +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyDeployments() bool { + if c.HasOnCreate() { + return c.OperatorBox.OnCreate.Deployments != nil + } + if c.HasOnReconcile() { + return c.OperatorBox.OnReconcile.Deployments != nil + } + + return false +} + +// HasAnyStatefulSets reports whether this CRD defines any StatefulSets +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStatefulSets() bool { + if c.HasOnCreate() { + return c.OperatorBox.OnCreate.StatefulSets != nil + } + if c.HasOnReconcile() { + return c.OperatorBox.OnReconcile.StatefulSets != nil + } + + return false +} + +// HasAnyReplicaSets reports whether this CRD defines any ReplicaSets +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyReplicaSets() bool { + if c.HasOnCreate() { + return c.OperatorBox.OnCreate.ReplicaSets != nil + } + if c.HasOnReconcile() { + return c.OperatorBox.OnReconcile.ReplicaSets != nil + } + + return false +} + +// HasAnySecrets reports whether this CRD defines any secrets +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnySecrets() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Secrets) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Secrets) > 0 + } + + return false +} + +// HasAnyTLSSecrets reports whether any secret in either phase +// defines a TLS configuration. +func (c *CRDEntry) HasAnyTLSSecrets() bool { + if c.HasOnCreate() { + for _, s := range c.OperatorBox.OnCreate.Secrets { + if s.TLS != nil { + return true + } + } + } + + if c.HasOnReconcile() { + for _, s := range c.OperatorBox.OnReconcile.Secrets { + if s.TLS != nil { + return true + } + } + } + + return false +} + +// HasAnyHPA reports whether this CRD defines any HPA defined +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyHPA() bool { + if c.HasOnCreate() { + return c.OperatorBox.OnCreate.HorizontalPodAutoscalers != nil + } + if c.HasOnReconcile() { + return c.OperatorBox.OnReconcile.HorizontalPodAutoscalers != nil + } + + return false +} + +// HasAnyServices reports whether this CRD defines any Services +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyServices() bool { + if c.HasOnCreate() { + return c.OperatorBox.OnCreate.Services != nil + } + if c.HasOnReconcile() { + return c.OperatorBox.OnReconcile.Services != nil + } + + return false +} + +// HasAnyPods reports whether this CRD defines any Pods +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPods() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Pods) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Pods) > 0 + } + return false +} + +// HasAnyConfigMaps reports whether this CRD defines any ConfigMaps +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyConfigMaps() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ConfigMaps) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ConfigMaps) > 0 + } + return false +} + +// HasAnyServiceAccounts reports whether this CRD defines any ServiceAccounts +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyServiceAccounts() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ServiceAccounts) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ServiceAccounts) > 0 + } + return false +} + +// HasAnyIngresses reports whether this CRD defines any Ingresses +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyIngresses() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Ingresses) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Ingresses) > 0 + } + return false +} + +// HasAnyPersistentVolumes reports whether this CRD defines any PersistentVolumes +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPersistentVolumes() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PersistentVolumes) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PersistentVolumes) > 0 + } + return false +} + +// HasAnyPersistentVolumeClaims reports whether this CRD defines any PVCs +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPersistentVolumeClaims() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PersistentVolumeClaims) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PersistentVolumeClaims) > 0 + } + return false +} + +// HasAnyPodDisruptionBudgets reports whether this CRD defines any PDBs +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPodDisruptionBudgets() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PodDisruptionBudgets) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PodDisruptionBudgets) > 0 + } + return false +} + +// HasAnyNamespaces reports whether this CRD defines any Namespaces +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyNamespaces() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Namespaces) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Namespaces) > 0 + } + return false +} + +// HasAnyRoles reports whether this CRD defines any Roles +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyRoles() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Roles) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Roles) > 0 + } + return false +} + +// HasAnyRoleBindings reports whether this CRD defines any RoleBindings +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyRoleBindings() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.RoleBindings) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.RoleBindings) > 0 + } + return false +} + +// HasAnyVolumes reports whether this CRD defines any Volumes (placeholder) +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyVolumes() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.Volumes) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.Volumes) > 0 + } + return false +} + +// HasAnyVolumeMounts reports whether this CRD defines any VolumeMounts (placeholder) +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyVolumeMounts() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.VolumeMounts) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.VolumeMounts) > 0 + } + return false +} + +// HasAnyClusterRoles reports whether this CRD defines any ClusterRoles +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyClusterRoles() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ClusterRoles) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ClusterRoles) > 0 + } + return false +} + +// HasAnyClusterRoleBindings reports whether this CRD defines any ClusterRoleBindings +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyClusterRoleBindings() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ClusterRoleBindings) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ClusterRoleBindings) > 0 + } + return false +} + +// HasAnyServiceMonitors reports whether this CRD defines any ServiceMonitors +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyServiceMonitors() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ServiceMonitors) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ServiceMonitors) > 0 + } + return false +} + +// HasAnyPodSecurityPolicies reports whether this CRD defines any PSPs +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPodSecurityPolicies() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PodSecurityPolicies) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PodSecurityPolicies) > 0 + } + return false +} + +// HasAnyPriorityClasses reports whether this CRD defines any PriorityClasses +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPriorityClasses() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PriorityClasses) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PriorityClasses) > 0 + } + return false +} + +// HasAnyLimitRanges reports whether this CRD defines any LimitRanges +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyLimitRanges() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.LimitRanges) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.LimitRanges) > 0 + } + return false +} + +// HasAnyResourceQuotas reports whether this CRD defines any ResourceQuotas +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyResourceQuotas() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.ResourceQuotas) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.ResourceQuotas) > 0 + } + return false +} + +// HasAnyRuntimeClasses reports whether this CRD defines any RuntimeClasses +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyRuntimeClasses() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.RuntimeClasses) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.RuntimeClasses) > 0 + } + return false +} + +// HasAnyPriorityLevelConfigurations reports whether this CRD defines any PL configs +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPriorityLevelConfigurations() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PriorityLevelConfigurations) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PriorityLevelConfigurations) > 0 + } + return false +} + +// HasAnyPodTemplates reports whether this CRD defines any PodTemplates +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyPodTemplates() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.PodTemplates) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.PodTemplates) > 0 + } + return false +} + +// HasAnyDaemonSets reports whether this CRD defines any DaemonSets +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyDaemonSets() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.DaemonSets) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.DaemonSets) > 0 + } + return false +} + +// HasAnyNetworkPolicies reports whether this CRD defines any NetworkPolicies +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyNetworkPolicies() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.NetworkPolicies) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.NetworkPolicies) > 0 + } + return false +} + +// HasAnyStorageClasses reports whether this CRD defines any StorageClasses +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStorageClasses() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StorageClasses) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StorageClasses) > 0 + } + return false +} + +// HasAnyStorageLocations reports whether this CRD defines any StorageLocations +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStorageLocations() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StorageLocations) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StorageLocations) > 0 + } + return false +} + +// HasAnyStoragePools reports whether this CRD defines any StoragePools +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStoragePools() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StoragePools) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StoragePools) > 0 + } + return false +} + +// HasAnyStorageBackups reports whether this CRD defines any StorageBackups +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStorageBackups() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StorageBackups) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StorageBackups) > 0 + } + return false +} + +// HasAnyStorageSnapshots reports whether this CRD defines any StorageSnapshots +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStorageSnapshots() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StorageSnapshots) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StorageSnapshots) > 0 + } + return false +} + +// HasAnyStorageVolumes reports whether this CRD defines any StorageVolumes +// in either OnCreate or OnReconcile phases. +func (c *CRDEntry) HasAnyStorageVolumes() bool { + if c.HasOnCreate() { + return len(c.OperatorBox.OnCreate.StorageVolumes) > 0 + } + if c.HasOnReconcile() { + return len(c.OperatorBox.OnReconcile.StorageVolumes) > 0 + } + return false +} + +// NeedsResourceDecl reports whether this CRD defines any workload resources +// (Deployments, StatefulSets, or ReplicaSets) in either OnCreate or OnReconcile. +func (c *CRDEntry) NeedsResourceDecl() bool { + return c.HasAnyDeployments() || + c.HasAnyReplicaSets() || + c.HasAnyStatefulSets() +} + +// ResourceDecl returns the first ResourceRequirements defined for this CRD. +// It checks OnCreate first, then OnReconcile, and searches Deployments, +// StatefulSets, and ReplicaSets in that order. Returns nil if none exist. +func (c *CRDEntry) ResourceDecl() *ResourceRequirements { + // OnCreate phase takes precedence + if c.HasOnCreate() { + if req := findResourceDeclInPhase(c.OperatorBox.OnCreate); req != nil { + return req + } + } + + // OnReconcile fallback + if c.HasOnReconcile() { + if req := findResourceDeclInPhase(c.OperatorBox.OnReconcile); req != nil { + return req + } + } + + return nil +} + +// findResourceDeclInPhase searches Deployments, StatefulSets, and ReplicaSets +// inside a single OperatorPhase and returns the first non-nil ResourceRequirements. +func findResourceDeclInPhase(tmpl *HookTemplates) *ResourceRequirements { + if tmpl == nil { + return nil + } + + // Deployments + if tmpl.Deployments != nil { + for _, d := range tmpl.Deployments { + if d.Resources != nil { + return d.Resources + } + } + } + + // StatefulSets + if tmpl.StatefulSets != nil { + for _, s := range tmpl.StatefulSets { + if s.Resources != nil { + return s.Resources + } + } + } + + // ReplicaSets + if tmpl.ReplicaSets != nil { + for _, r := range tmpl.ReplicaSets { + if r.Resources != nil { + return r.Resources + } + } + } + + return nil +} diff --git a/pkg/types/hook_temp.go b/pkg/types/hook_temp.go new file mode 100644 index 00000000..e614f60c --- /dev/null +++ b/pkg/types/hook_temp.go @@ -0,0 +1,139 @@ +package types + +// VisitResources calls fn for every resource template in this HookTemplates. +// It abstracts over all resource slices (Deployments, Services, Jobs, etc.) +// so callers can perform generic operations like detecting Sleep, validating +// fields, or scanning for managed-resource contracts. +func (h *HookTemplates) VisitResources(fn func(res interface{})) { + // Core workload resources + for _, x := range h.Deployments { + fn(x) + } + for _, x := range h.ReplicaSets { + fn(x) + } + for _, x := range h.StatefulSets { + fn(x) + } + for _, x := range h.DaemonSets { + fn(x) + } + for _, x := range h.Pods { + fn(x) + } + + // Services & networking + for _, x := range h.Services { + fn(x) + } + for _, x := range h.Ingresses { + fn(x) + } + for _, x := range h.NetworkPolicies { + fn(x) + } + + // Batch + for _, x := range h.Jobs { + fn(x) + } + for _, x := range h.CronJobs { + fn(x) + } + + // Config & identity + for _, x := range h.Secrets { + fn(x) + } + for _, x := range h.ConfigMaps { + fn(x) + } + for _, x := range h.ServiceAccounts { + fn(x) + } + for _, x := range h.Roles { + fn(x) + } + for _, x := range h.RoleBindings { + fn(x) + } + for _, x := range h.ClusterRoles { + fn(x) + } + for _, x := range h.ClusterRoleBindings { + fn(x) + } + + // Storage + for _, x := range h.PersistentVolumes { + fn(x) + } + for _, x := range h.PersistentVolumeClaims { + fn(x) + } + for _, x := range h.StorageClasses { + fn(x) + } + for _, x := range h.StorageLocations { + fn(x) + } + for _, x := range h.StoragePools { + fn(x) + } + for _, x := range h.StorageBackups { + fn(x) + } + for _, x := range h.StorageSnapshots { + fn(x) + } + for _, x := range h.StorageVolumes { + fn(x) + } + + // Autoscaling, disruption, scheduling + for _, x := range h.HorizontalPodAutoscalers { + fn(x) + } + for _, x := range h.PodDisruptionBudgets { + fn(x) + } + for _, x := range h.PriorityClasses { + fn(x) + } + for _, x := range h.RuntimeClasses { + fn(x) + } + for _, x := range h.LimitRanges { + fn(x) + } + for _, x := range h.ResourceQuotas { + fn(x) + } + + // Namespaces + for _, x := range h.Namespaces { + fn(x) + } + + // Pod templates + for _, x := range h.PodTemplates { + fn(x) + } + + // Placeholders (future extensibility) + for _, x := range h.Volumes { + fn(x) + } + for _, x := range h.VolumeMounts { + fn(x) + } + for _, x := range h.ServiceMonitors { + fn(x) + } + for _, x := range h.PodSecurityPolicies { + fn(x) + } + for _, x := range h.PriorityLevelConfigurations { + fn(x) + } +} diff --git a/pkg/types/hooks_sleep.go b/pkg/types/hooks_sleep.go new file mode 100644 index 00000000..ebed8d7c --- /dev/null +++ b/pkg/types/hooks_sleep.go @@ -0,0 +1,211 @@ +package types + +import ( + "fmt" + "strings" +) + +// SleepEntry describes a single sleep declaration found in a HookTemplates +// resource. It includes the phase (onCreate/onReconcile/onDelete), the +// resource type (for human diagnostics), an optional resource name (if present +// in the template), and the raw duration string. +type SleepEntry struct { + Phase string // "onCreate", "onReconcile", "onDelete" + Resource string // e.g., "Deployment", "Service", "Job" + ResourceName string // template name field if available (may be empty) + Duration string // raw duration string as written in the katalog +} + +// tiny interfaces used by CollectSleepEntries to avoid a large type switch. +type sleeper interface { + GetSleep() string +} + +type namer interface { + GetName() string +} + +// CollectSleepEntries returns all SleepEntry items declared for this CRD across +// OnCreate, OnReconcile and OnDelete. This is the canonical discovery method +// callers should use for validation, diagnostics, or runtime wiring. +func (c *CRDEntry) CollectSleepEntries() []SleepEntry { + if !c.HasAnyHooks() { + return nil + } + + var out []SleepEntry + + collect := func(phase string, ht *HookTemplates) { + if ht == nil { + return + } + ht.VisitResources(func(res interface{}) { + // Prefer the sleeper interface + si, ok := res.(sleeper) + if !ok { + // nothing to do for resources that don't expose GetSleep + return + } + s := si.GetSleep() + if s == "" { + return + } + + // Try to get a friendly resource name via namer + var rname string + if n, ok := res.(namer); ok { + rname = n.GetName() + } + + // Derive a short resource type from the concrete type + rtype := fmt.Sprintf("%T", res) + if i := strings.LastIndex(rtype, "."); i >= 0 { + rtype = rtype[i+1:] + } + + out = append(out, SleepEntry{ + Phase: phase, + Resource: rtype, + ResourceName: rname, + Duration: s, + }) + }) + } + + if c.HasOnCreate() { + collect("onCreate", c.OperatorBox.OnCreate) + } + if c.HasOnReconcile() { + collect("onReconcile", c.OperatorBox.OnReconcile) + } + if c.HasOnDelete() { + collect("onDelete", c.OperatorBox.OnDelete) + } + + return out +} + +// HasSleep reports whether any sleep declarations exist for this CRD. +// It is a thin convenience wrapper around CollectSleepEntries. +func (c *CRDEntry) HasSleep() bool { + return len(c.CollectSleepEntries()) > 0 +} + +// extractSleep attempts to read a Sleep value from any resource template. +// A resource participates if it implements GetSleep(). Returns the raw +// duration string and true when present, otherwise an empty string and false. +func extractSleep(res interface{}) (string, bool) { + if s, ok := res.(sleeper); ok { + return s.GetSleep(), true + } + return "", false +} + +// GetSleep returns the optional artificial delay configured for this resource. +// When non-empty, Orkestra injects the delay at the start of each reconcile +// for latency simulation, autoscale testing, or chaos engineering scenarios. +// +// GetName returns the template's name field when available. These small +// accessors keep CollectSleepEntries concise and avoid a large type switch. +// +// NOTE: value receivers are used so methods are available on both values and pointers. + +// Core workload resources +func (t DeploymentTemplateSource) GetSleep() string { return t.Sleep } +func (t ReplicaSetTemplateSource) GetSleep() string { return t.Sleep } +func (t StatefulSetTemplateSource) GetSleep() string { return t.Sleep } + +// func (t DaemonSetTemplateSource) GetSleep() string { return t.Sleep } +func (t PodTemplateSource) GetSleep() string { return t.Sleep } + +// Services & networking +func (t ServiceTemplateSource) GetSleep() string { return t.Sleep } +func (t IngressTemplateSource) GetSleep() string { return t.Sleep } + +// func (t NetworkPolicyTemplateSource) GetSleep() string { return t.Sleep } + +// Batch +func (t JobTemplateSource) GetSleep() string { return t.Sleep } +func (t CronJobTemplateSource) GetSleep() string { return t.Sleep } + +// Config & identity +func (t SecretTemplateSource) GetSleep() string { return t.Sleep } +func (t ConfigMapTemplateSource) GetSleep() string { return t.Sleep } +func (t ServiceAccountTemplateSource) GetSleep() string { return t.Sleep } +func (t RoleTemplateSource) GetSleep() string { return t.Sleep } +func (t RoleBindingTemplateSource) GetSleep() string { return t.Sleep } + +// Storage +func (t PVTemplateSource) GetSleep() string { return t.Sleep } +func (t PVCTemplateSource) GetSleep() string { return t.Sleep } + +// Autoscaling, disruption, scheduling +func (t HPATemplateSource) GetSleep() string { return t.Sleep } +func (t PDBTemplateSource) GetSleep() string { return t.Sleep } +func (t NamespaceTemplateSource) GetSleep() string { return t.Sleep } + +// P L A C E H O L D E R S +// func (t PlaceholderSource) GetSleep() string { return t.Sleep } + +// // Storage +// func (t StorageClassTemplateSource) GetSleep() string { return t.Sleep } +// func (t StorageLocationTemplateSource) GetSleep() string { return t.Sleep } +// func (t StoragePoolTemplateSource) GetSleep() string { return t.Sleep } +// func (t StorageBackupTemplateSource) GetSleep() string { return t.Sleep } +// func (t StorageSnapshotTemplateSource) GetSleep() string { return t.Sleep } +// func (t StorageVolumeTemplateSource) GetSleep() string { return t.Sleep } + +// // Scheduling / QoS +// func (t PriorityClassTemplateSource) GetSleep() string { return t.Sleep } +// func (t RuntimeClassTemplateSource) GetSleep() string { return t.Sleep } +// func (t LimitRangeTemplateSource) GetSleep() string { return t.Sleep } +// func (t ResourceQuotaTemplateSource) GetSleep() string { return t.Sleep } +// func (t PriorityLevelConfigurationTemplateSource) GetSleep() string { +// return t.Sleep +// } + +// // Pod templates and monitors +// func (t PodTemplatePlaceholderSource) GetSleep() string { return t.Sleep } +// func (t ServiceMonitorTemplateSource) GetSleep() string { return t.Sleep } +// func (t PodSecurityPolicyTemplateSource) GetSleep() string { return t.Sleep } + +// Additional common types +func (t PodTemplateSource) GetName() string { return t.Name } +func (t DeploymentTemplateSource) GetName() string { return t.Name } +func (t ReplicaSetTemplateSource) GetName() string { return t.Name } +func (t StatefulSetTemplateSource) GetName() string { return t.Name } + +// func (t DaemonSetTemplateSource) GetName() string { return t.Name } +func (t ServiceTemplateSource) GetName() string { return t.Name } +func (t IngressTemplateSource) GetName() string { return t.Name } + +// func (t NetworkPolicyTemplateSource) GetName() string { return t.Name } +func (t JobTemplateSource) GetName() string { return t.Name } +func (t CronJobTemplateSource) GetName() string { return t.Name } +func (t SecretTemplateSource) GetName() string { return t.Name } +func (t ConfigMapTemplateSource) GetName() string { return t.Name } +func (t ServiceAccountTemplateSource) GetName() string { return t.Name } +func (t RoleTemplateSource) GetName() string { return t.Name } +func (t RoleBindingTemplateSource) GetName() string { return t.Name } +func (t PVTemplateSource) GetName() string { return t.Name } +func (t PVCTemplateSource) GetName() string { return t.Name } +func (t HPATemplateSource) GetName() string { return t.Name } +func (t PDBTemplateSource) GetName() string { return t.Name } +func (t NamespaceTemplateSource) GetName() string { return t.Name } + +// P L A C E H O L D E R S +// func (t PlaceholderSource) GetName() string { return t.Name } +// func (t StorageClassTemplateSource) GetName() string { return t.Name } +// func (t StorageLocationTemplateSource) GetName() string { return t.Name } +// func (t StoragePoolTemplateSource) GetName() string { return t.Name } +// func (t StorageBackupTemplateSource) GetName() string { return t.Name } +// func (t StorageSnapshotTemplateSource) GetName() string { return t.Name } +// func (t StorageVolumeTemplateSource) GetName() string { return t.Name } +// func (t PriorityClassTemplateSource) GetName() string { return t.Name } +// func (t RuntimeClassTemplateSource) GetName() string { return t.Name } +// func (t LimitRangeTemplateSource) GetName() string { return t.Name } +// func (t ResourceQuotaTemplateSource) GetName() string { return t.Name } +// func (t PriorityLevelConfigurationTemplateSource) GetName() string { return t.Name } +// func (t PodTemplatePlaceholderSource) GetName() string { return t.Name } +// func (t ServiceMonitorTemplateSource) GetName() string { return t.Name } +// func (t PodSecurityPolicyTemplateSource) GetName() string {return t.Name} diff --git a/pkg/types/methods.go b/pkg/types/methods.go index 943a2682..c6010184 100644 --- a/pkg/types/methods.go +++ b/pkg/types/methods.go @@ -385,11 +385,6 @@ func (c *CRDEntry) HasOnDelete() bool { return c.OperatorBox.OnDelete != nil } -// HasAnyHooks reports whether this CRD declares any onCreate, onReconcile, or onDelete hooks. -func (c *CRDEntry) HasAnyHooks() bool { - return c.HasOnCreate() || c.HasOnReconcile() || c.HasOnDelete() -} - // HasStatusFields reports whether this CRD declares any status fields. func (c *CRDEntry) HasStatusFields() bool { return c.OperatorBox.Status != nil && c.OperatorBox.Status.HasFields() @@ -425,67 +420,6 @@ func (c *CRDEntry) HasRestrictedNamespaces() bool { return len(c.RestrictedNamespaces) > 0 } -// HasAnySecrets reports whether this CRD defines any secrets -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnySecrets() bool { - if c.HasOnCreate() { - return len(c.OperatorBox.OnCreate.Secrets) > 0 - } - if c.HasOnReconcile() { - return len(c.OperatorBox.OnReconcile.Secrets) > 0 - } - - return false -} - -// HasAnyTLSSecrets reports whether any secret in either phase -// defines a TLS configuration. -func (c *CRDEntry) HasAnyTLSSecrets() bool { - if c.HasOnCreate() { - for _, s := range c.OperatorBox.OnCreate.Secrets { - if s.TLS != nil { - return true - } - } - } - - if c.HasOnReconcile() { - for _, s := range c.OperatorBox.OnReconcile.Secrets { - if s.TLS != nil { - return true - } - } - } - - return false -} - -// HasAnyHPA reports whether this CRD defines any HPA defined -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnyHPA() bool { - if c.HasOnCreate() { - return c.OperatorBox.OnCreate.HorizontalPodAutoscalers != nil - } - if c.HasOnReconcile() { - return c.OperatorBox.OnReconcile.HorizontalPodAutoscalers != nil - } - - return false -} - -// HasAnyServices reports whether this CRD defines any Services -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnyServices() bool { - if c.HasOnCreate() { - return c.OperatorBox.OnCreate.Services != nil - } - if c.HasOnReconcile() { - return c.OperatorBox.OnReconcile.Services != nil - } - - return false -} - // IsValidServiceType reports whether the provided service type is valid. // Accepted values (case‑insensitive): // - ClusterIP @@ -513,108 +447,3 @@ func IsValidProtocol(p string) bool { return false } } - -// HasAnyDeployments reports whether this CRD defines any Deployments -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnyDeployments() bool { - if c.HasOnCreate() { - return c.OperatorBox.OnCreate.Deployments != nil - } - if c.HasOnReconcile() { - return c.OperatorBox.OnReconcile.Deployments != nil - } - - return false -} - -// HasAnyStatefulSets reports whether this CRD defines any StatefulSets -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnyStatefulSets() bool { - if c.HasOnCreate() { - return c.OperatorBox.OnCreate.StatefulSets != nil - } - if c.HasOnReconcile() { - return c.OperatorBox.OnReconcile.StatefulSets != nil - } - - return false -} - -// HasAnyReplicaSets reports whether this CRD defines any ReplicaSets -// in either OnCreate or OnReconcile phases. -func (c *CRDEntry) HasAnyReplicaSets() bool { - if c.HasOnCreate() { - return c.OperatorBox.OnCreate.ReplicaSets != nil - } - if c.HasOnReconcile() { - return c.OperatorBox.OnReconcile.ReplicaSets != nil - } - - return false -} - -// NeedsResourceDecl reports whether this CRD defines any workload resources -// (Deployments, StatefulSets, or ReplicaSets) in either OnCreate or OnReconcile. -func (c *CRDEntry) NeedsResourceDecl() bool { - return c.HasAnyDeployments() || - c.HasAnyReplicaSets() || - c.HasAnyStatefulSets() -} - -// ResourceDecl returns the first ResourceRequirements defined for this CRD. -// It checks OnCreate first, then OnReconcile, and searches Deployments, -// StatefulSets, and ReplicaSets in that order. Returns nil if none exist. -func (c *CRDEntry) ResourceDecl() *ResourceRequirements { - // OnCreate phase takes precedence - if c.HasOnCreate() { - if req := findResourceDeclInPhase(c.OperatorBox.OnCreate); req != nil { - return req - } - } - - // OnReconcile fallback - if c.HasOnReconcile() { - if req := findResourceDeclInPhase(c.OperatorBox.OnReconcile); req != nil { - return req - } - } - - return nil -} - -// findResourceDeclInPhase searches Deployments, StatefulSets, and ReplicaSets -// inside a single OperatorPhase and returns the first non-nil ResourceRequirements. -func findResourceDeclInPhase(tmpl *HookTemplates) *ResourceRequirements { - if tmpl == nil { - return nil - } - - // Deployments - if tmpl.Deployments != nil { - for _, d := range tmpl.Deployments { - if d.Resources != nil { - return d.Resources - } - } - } - - // StatefulSets - if tmpl.StatefulSets != nil { - for _, s := range tmpl.StatefulSets { - if s.Resources != nil { - return s.Resources - } - } - } - - // ReplicaSets - if tmpl.ReplicaSets != nil { - for _, r := range tmpl.ReplicaSets { - if r.Resources != nil { - return r.Resources - } - } - } - - return nil -} diff --git a/pkg/types/secret_rotation.go b/pkg/types/secret_rotation.go index b6b9235d..3e832046 100644 --- a/pkg/types/secret_rotation.go +++ b/pkg/types/secret_rotation.go @@ -55,7 +55,7 @@ // Supported: s (seconds), m (minutes), h (hours), d (days), y (years) // Examples: 30s, 5m, 12h, 90d, 1y, 365d // Note: d and y are extensions beyond Go's time.ParseDuration (which stops at h). -// ParseRotationDuration handles d and y by converting to hours. +// ParseTimeDuration handles d and y by converting to hours. // // ── Webhook certificate support ─────────────────────────────────────────── // @@ -119,7 +119,7 @@ type TLSSpec struct { Organization string `yaml:"organization,omitempty" json:"organization,omitempty"` } -// ParseRotationDuration parses a human‑friendly rotation duration string. +// ParseTimeDuration parses a human‑friendly rotation duration string. // // It extends Go's time.ParseDuration by adding long‑term units: // @@ -142,7 +142,7 @@ type TLSSpec struct { // - Only "mo" is accepted for months to avoid collision with Go's "m" (minutes). // - Fractional values are supported (e.g., "1.5mo"). // - Falls back to time.ParseDuration for standard units. -func ParseRotationDuration(s string) (time.Duration, error) { +func ParseTimeDuration(s string) (time.Duration, error) { if s == "" { return 0, fmt.Errorf("empty duration") } @@ -203,7 +203,7 @@ func NeedsRotation(generatedAt, rotateAfter string) bool { return true } - threshold, err := ParseRotationDuration(rotateAfter) + threshold, err := ParseTimeDuration(rotateAfter) if err != nil { return false // invalid duration — do not rotate unexpectedly } diff --git a/pkg/types/secret_rotation_test.go b/pkg/types/secret_rotation_test.go index 7c747306..43f770b3 100644 --- a/pkg/types/secret_rotation_test.go +++ b/pkg/types/secret_rotation_test.go @@ -1,4 +1,4 @@ -// Tests for ParseRotationDuration and NeedsRotation (secret_rotation.go). +// Tests for ParseTimeDuration and NeedsRotation (secret_rotation.go). package types_test import ( @@ -10,80 +10,80 @@ import ( "github.com/stretchr/testify/require" ) -// ── ParseRotationDuration ───────────────────────────────────────────────────── +// ── ParseTimeDuration ───────────────────────────────────────────────────── -func TestParseRotationDuration_Empty(t *testing.T) { - _, err := orktypes.ParseRotationDuration("") +func TestParseTimeDuration_Empty(t *testing.T) { + _, err := orktypes.ParseTimeDuration("") assert.Error(t, err) } -func TestParseRotationDuration_Seconds(t *testing.T) { - d, err := orktypes.ParseRotationDuration("30s") +func TestParseTimeDuration_Seconds(t *testing.T) { + d, err := orktypes.ParseTimeDuration("30s") require.NoError(t, err) assert.Equal(t, 30*time.Second, d) } -func TestParseRotationDuration_Minutes(t *testing.T) { - d, err := orktypes.ParseRotationDuration("5m") +func TestParseTimeDuration_Minutes(t *testing.T) { + d, err := orktypes.ParseTimeDuration("5m") require.NoError(t, err) assert.Equal(t, 5*time.Minute, d) } -func TestParseRotationDuration_Hours(t *testing.T) { - d, err := orktypes.ParseRotationDuration("12h") +func TestParseTimeDuration_Hours(t *testing.T) { + d, err := orktypes.ParseTimeDuration("12h") require.NoError(t, err) assert.Equal(t, 12*time.Hour, d) } -func TestParseRotationDuration_Days(t *testing.T) { - d, err := orktypes.ParseRotationDuration("10d") +func TestParseTimeDuration_Days(t *testing.T) { + d, err := orktypes.ParseTimeDuration("10d") require.NoError(t, err) assert.Equal(t, 10*24*time.Hour, d) } -func TestParseRotationDuration_Weeks(t *testing.T) { - d, err := orktypes.ParseRotationDuration("2w") +func TestParseTimeDuration_Weeks(t *testing.T) { + d, err := orktypes.ParseTimeDuration("2w") require.NoError(t, err) assert.Equal(t, 14*24*time.Hour, d) } -func TestParseRotationDuration_Months(t *testing.T) { - d, err := orktypes.ParseRotationDuration("3mo") +func TestParseTimeDuration_Months(t *testing.T) { + d, err := orktypes.ParseTimeDuration("3mo") require.NoError(t, err) assert.Equal(t, 90*24*time.Hour, d) } -func TestParseRotationDuration_Years(t *testing.T) { - d, err := orktypes.ParseRotationDuration("1y") +func TestParseTimeDuration_Years(t *testing.T) { + d, err := orktypes.ParseTimeDuration("1y") require.NoError(t, err) assert.Equal(t, 365*24*time.Hour, d) } -func TestParseRotationDuration_FractionalYear(t *testing.T) { - d, err := orktypes.ParseRotationDuration("0.5y") +func TestParseTimeDuration_FractionalYear(t *testing.T) { + d, err := orktypes.ParseTimeDuration("0.5y") require.NoError(t, err) // 0.5 * 365 days assert.Equal(t, time.Duration(0.5*float64(365*24*time.Hour)), d) } -func TestParseRotationDuration_FractionalMonths(t *testing.T) { - d, err := orktypes.ParseRotationDuration("1.5mo") +func TestParseTimeDuration_FractionalMonths(t *testing.T) { + d, err := orktypes.ParseTimeDuration("1.5mo") require.NoError(t, err) assert.Equal(t, time.Duration(1.5*float64(30*24*time.Hour)), d) } -func TestParseRotationDuration_InvalidYear(t *testing.T) { - _, err := orktypes.ParseRotationDuration("xy") +func TestParseTimeDuration_InvalidYear(t *testing.T) { + _, err := orktypes.ParseTimeDuration("xy") assert.Error(t, err) } -func TestParseRotationDuration_InvalidDay(t *testing.T) { - _, err := orktypes.ParseRotationDuration("xd") +func TestParseTimeDuration_InvalidDay(t *testing.T) { + _, err := orktypes.ParseTimeDuration("xd") assert.Error(t, err) } -func TestParseRotationDuration_InvalidGoSyntax(t *testing.T) { - _, err := orktypes.ParseRotationDuration("not-a-duration") +func TestParseTimeDuration_InvalidGoSyntax(t *testing.T) { + _, err := orktypes.ParseTimeDuration("not-a-duration") assert.Error(t, err) } diff --git a/pkg/types/types.go b/pkg/types/types.go index a78d5942..5672093d 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -439,6 +439,11 @@ type DeploymentTemplateSource struct { // Useful for Git-backed pipelines where build/test commands must run inside // a checked-out repository path. WorkingDirectory string `yaml:"workingDirectory,omitempty" json:"workingDirectory,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── ReplicaSet ──────────────────────────────────────────────────────────────── @@ -570,6 +575,11 @@ type ReplicaSetTemplateSource struct { // WorkingDirectory sets the container's working directory (container.WorkingDir). WorkingDirectory string `yaml:"workingDirectory,omitempty" json:"workingDirectory,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Service ─────────────────────────────────────────────────────────────────── @@ -667,6 +677,11 @@ type ServiceTemplateSource struct { // AnyOf holds OR conditions — at least one must pass for this resource to be created. // Works alongside the existing Conditions (when:) field which uses AND semantics. AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Pod ─────────────────────────────────────────────────────────────────────── @@ -770,6 +785,11 @@ type PodTemplateSource struct { // AnyOf holds OR conditions — at least one must pass for this resource to be created. // Works alongside the existing Conditions (when:) field which uses AND semantics. AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Job ─────────────────────────────────────────────────────────────────────── @@ -882,6 +902,11 @@ type JobTemplateSource struct { // Useful for Git-backed pipelines where build/test commands must run inside // a checked-out repository path. WorkingDirectory string `yaml:"workingDirectory,omitempty" json:"workingDirectory,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── CronJob ─────────────────────────────────────────────────────────────────── @@ -989,6 +1014,11 @@ type CronJobTemplateSource struct { // Useful for Git-backed pipelines where build/test commands must run inside // a checked-out repository path. WorkingDirectory string `yaml:"workingDirectory,omitempty" json:"workingDirectory,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── ConfigMap ───────────────────────────────────────────────────────────────── @@ -1071,6 +1101,11 @@ type ConfigMapTemplateSource struct { // AnyOf holds OR conditions — at least one must pass for this resource to be created. // Works alongside the existing Conditions (when:) field which uses AND semantics. AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Secret ───────────────────────────────────────────────────────────────────── @@ -1208,6 +1243,11 @@ type SecretTemplateSource struct { // - "{{ .metadata.name }}.{{ .metadata.namespace }}.svc.cluster.local" // validFor: 1y TLS *TLSSpec `yaml:"tls,omitempty" json:"tls,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── ServiceAccount ──────────────────────────────────────────────────────────── @@ -1274,6 +1314,11 @@ type ServiceAccountTemplateSource struct { // AnyOf holds OR conditions — at least one must pass for this resource to be created. // Works alongside the existing Conditions (when:) field which uses AND semantics. AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Namespace ──────────────────────────────────────────────────────────── @@ -1337,6 +1382,11 @@ type NamespaceTemplateSource struct { // AnyOf holds OR conditions — at least one must pass for this resource to be created. // Works alongside the existing Conditions (when:) field which uses AND semantics. AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── Ingress ─────────────────────────────────────────────────────────────────── @@ -1401,6 +1451,11 @@ type IngressTemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // IngressTLSSpec configures TLS for an Ingress resource. @@ -1420,6 +1475,11 @@ type IngressTLSSpec struct { // ValidFor — certificate validity duration (e.g. "1y", "90d"). Default: "1y". ValidFor string `yaml:"validFor" json:"validFor,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── HorizontalPodAutoscaler ─────────────────────────────────────────────────── @@ -1474,6 +1534,11 @@ type HPATemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── PodDisruptionBudget ─────────────────────────────────────────────────────── @@ -1521,6 +1586,11 @@ type PDBTemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // StatefulSetTemplateSource declares one StatefulSet to be managed by Orkestra. @@ -1598,6 +1668,11 @@ type StatefulSetTemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // PVCTemplateSource declares one PersistentVolumeClaim to be managed by Orkestra. @@ -1632,6 +1707,11 @@ type PVCTemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // PVTemplateSource declares one PersistentVolume to be managed by Orkestra. @@ -1667,6 +1747,99 @@ type PVTemplateSource struct { Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` +} + +// ── Role / RoleBinding ──────────────────────────────────────────────────────── + +// PolicyRuleSpec declares one RBAC policy rule. +// String values within slices support template expressions. +type PolicyRuleSpec struct { + APIGroups []string `yaml:"apiGroups" json:"apiGroups,omitempty"` + Resources []string `yaml:"resources" json:"resources,omitempty"` + Verbs []string `yaml:"verbs" json:"verbs,omitempty"` + ResourceNames []string `yaml:"resourceNames" json:"resourceNames,omitempty"` +} + +// SubjectSpec declares one RBAC subject for a RoleBinding. +// Name and Namespace support template expressions. +type SubjectSpec struct { + Kind string `yaml:"kind" json:"kind,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Namespace string `yaml:"namespace" json:"namespace,omitempty"` +} + +// RoleRefSpec names the Role (or ClusterRole) being bound. +// Name supports template expressions. Kind defaults to "Role". +type RoleRefSpec struct { + Name string `yaml:"name" json:"name,omitempty"` + Kind string `yaml:"kind" json:"kind,omitempty"` // Role | ClusterRole; defaults to Role +} + +// RoleTemplateSource declares one namespaced Role to be managed by Orkestra. +// +// Example: +// +// onCreate: +// roles: +// - name: "{{ .metadata.name }}-role" +// namespace: "{{ .metadata.name }}-ns" +// rules: +// - apiGroups: ["apps"] +// resources: ["deployments"] +// verbs: ["get", "list", "watch", "update", "patch"] +// resourceNames: ["{{ .metadata.name }}"] +type RoleTemplateSource struct { + Version string `yaml:"version" json:"version,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Namespace string `yaml:"namespace" json:"namespace,omitempty"` + Labels []ResourceLabel `yaml:"labels" json:"labels,omitempty"` + Rules []PolicyRuleSpec `yaml:"rules" json:"rules,omitempty"` + Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` + Reconcile bool `yaml:"reconcile" json:"reconcile,omitempty"` + ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` +} + +// RoleBindingTemplateSource declares one RoleBinding to be managed by Orkestra. +// +// Example: +// +// onCreate: +// roleBindings: +// - name: "{{ .metadata.name }}-rolebinding" +// namespace: "{{ .metadata.name }}-ns" +// roleRef: +// name: "{{ .metadata.name }}-role" +// subjects: +// - kind: ServiceAccount +// name: "{{ .metadata.name }}-sa" +// namespace: "{{ .metadata.name }}-ns" +type RoleBindingTemplateSource struct { + Version string `yaml:"version" json:"version,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Namespace string `yaml:"namespace" json:"namespace,omitempty"` + Labels []ResourceLabel `yaml:"labels" json:"labels,omitempty"` + RoleRef RoleRefSpec `yaml:"roleRef" json:"roleRef,omitempty"` + Subjects []SubjectSpec `yaml:"subjects" json:"subjects,omitempty"` + Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` + Reconcile bool `yaml:"reconcile" json:"reconcile,omitempty"` + ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` + AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` + + // Sleep injects an artificial delay into the reconcile of this resource. + // Useful for autoscale testing, latency simulation, and chaos engineering. + // Accepts extended duration units (s, m, h, d, w, mo, y). + Sleep string `json:"sleep,omitempty" yaml:"sleep,omitempty"` } // ── HookTemplates ───────────────────────────────────────────────────────────── @@ -1716,6 +1889,8 @@ type HookTemplates struct { HorizontalPodAutoscalers []HPATemplateSource `yaml:"hpa" json:"hpa,omitempty" validate:"omitempty"` PodDisruptionBudgets []PDBTemplateSource `yaml:"pdb" json:"pdb,omitempty" validate:"omitempty"` Namespaces []NamespaceTemplateSource `yaml:"namespaces" json:"namespaces,omitempty" validate:"omitempty"` + Roles []RoleTemplateSource `yaml:"roles" json:"roles,omitempty" validate:"omitempty"` + RoleBindings []RoleBindingTemplateSource `yaml:"roleBindings" json:"roleBindings,omitempty" validate:"omitempty"` // External declares HTTP calls to make before resource creation. // Results available as .external..status, .body, .error @@ -1759,22 +1934,20 @@ type HookTemplates struct { Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` // TODO with placeholer - Volumes []PlaceholderSource `yaml:"volumes" json:"volumes,omitempty" validate:"omitempty"` - VolumeMounts []PlaceholderSource `yaml:"volumeMounts" json:"volumeMounts,omitempty" validate:"omitempty"` - Roles []RoleTemplateSource `yaml:"roles" json:"roles,omitempty" validate:"omitempty"` - RoleBindings []RoleBindingTemplateSource `yaml:"roleBindings" json:"roleBindings,omitempty" validate:"omitempty"` - ClusterRoles []PlaceholderSource `yaml:"clusterRoles" json:"clusterRoles,omitempty" validate:"omitempty"` - ClusterRoleBindings []PlaceholderSource `yaml:"clusterRoleBindings" json:"clusterRoleBindings,omitempty" validate:"omitempty"` - ServiceMonitors []PlaceholderSource `yaml:"serviceMonitors" json:"serviceMonitors,omitempty" validate:"omitempty"` - PodSecurityPolicies []PlaceholderSource `yaml:"podSecurityPolicies" json:"podSecurityPolicies,omitempty" validate:"omitempty"` - PriorityClasses []PlaceholderSource `yaml:"priorityClasses" json:"priorityClasses,omitempty" validate:"omitempty"` - LimitRanges []PlaceholderSource `yaml:"limitRanges" json:"limitRanges,omitempty" validate:"omitempty"` - ResourceQuotas []PlaceholderSource `yaml:"resourceQuotas" json:"resourceQuotas,omitempty" validate:"omitempty"` - RuntimeClasses []PlaceholderSource `yaml:"runtimeClasses" json:"runtimeClasses,omitempty" validate:"omitempty"` - PriorityLevelConfigurations []PlaceholderSource `yaml:"priorityLevelConfigurations" json:"priorityLevelConfigurations,omitempty" validate:"omitempty"` - PodTemplates []PlaceholderSource `yaml:"podTemplates" json:"podTemplates,omitempty" validate:"omitempty"` - DaemonSets []PlaceholderSource `yaml:"daemonSets" json:"daemonSets,omitempty" validate:"omitempty"` - NetworkPolicies []PlaceholderSource `yaml:"networkPolicies" json:"networkPolicies,omitempty" validate:"omitempty"` + Volumes []PlaceholderSource `yaml:"volumes" json:"volumes,omitempty" validate:"omitempty"` + VolumeMounts []PlaceholderSource `yaml:"volumeMounts" json:"volumeMounts,omitempty" validate:"omitempty"` + ClusterRoles []PlaceholderSource `yaml:"clusterRoles" json:"clusterRoles,omitempty" validate:"omitempty"` + ClusterRoleBindings []PlaceholderSource `yaml:"clusterRoleBindings" json:"clusterRoleBindings,omitempty" validate:"omitempty"` + ServiceMonitors []PlaceholderSource `yaml:"serviceMonitors" json:"serviceMonitors,omitempty" validate:"omitempty"` + PodSecurityPolicies []PlaceholderSource `yaml:"podSecurityPolicies" json:"podSecurityPolicies,omitempty" validate:"omitempty"` + PriorityClasses []PlaceholderSource `yaml:"priorityClasses" json:"priorityClasses,omitempty" validate:"omitempty"` + LimitRanges []PlaceholderSource `yaml:"limitRanges" json:"limitRanges,omitempty" validate:"omitempty"` + ResourceQuotas []PlaceholderSource `yaml:"resourceQuotas" json:"resourceQuotas,omitempty" validate:"omitempty"` + RuntimeClasses []PlaceholderSource `yaml:"runtimeClasses" json:"runtimeClasses,omitempty" validate:"omitempty"` + PriorityLevelConfigurations []PlaceholderSource `yaml:"priorityLevelConfigurations" json:"priorityLevelConfigurations,omitempty" validate:"omitempty"` + PodTemplates []PlaceholderSource `yaml:"podTemplates" json:"podTemplates,omitempty" validate:"omitempty"` + DaemonSets []PlaceholderSource `yaml:"daemonSets" json:"daemonSets,omitempty" validate:"omitempty"` + NetworkPolicies []PlaceholderSource `yaml:"networkPolicies" json:"networkPolicies,omitempty" validate:"omitempty"` // Storage StorageClasses []PlaceholderSource `yaml:"storageClasses" json:"storageClasses,omitempty" validate:"omitempty"` @@ -1785,84 +1958,6 @@ type HookTemplates struct { StorageVolumes []PlaceholderSource `yaml:"storageVolumes" json:"storageVolumes,omitempty" validate:"omitempty"` } -// ── Role / RoleBinding ──────────────────────────────────────────────────────── - -// PolicyRuleSpec declares one RBAC policy rule. -// String values within slices support template expressions. -type PolicyRuleSpec struct { - APIGroups []string `yaml:"apiGroups" json:"apiGroups,omitempty"` - Resources []string `yaml:"resources" json:"resources,omitempty"` - Verbs []string `yaml:"verbs" json:"verbs,omitempty"` - ResourceNames []string `yaml:"resourceNames" json:"resourceNames,omitempty"` -} - -// SubjectSpec declares one RBAC subject for a RoleBinding. -// Name and Namespace support template expressions. -type SubjectSpec struct { - Kind string `yaml:"kind" json:"kind,omitempty"` - Name string `yaml:"name" json:"name,omitempty"` - Namespace string `yaml:"namespace" json:"namespace,omitempty"` -} - -// RoleRefSpec names the Role (or ClusterRole) being bound. -// Name supports template expressions. Kind defaults to "Role". -type RoleRefSpec struct { - Name string `yaml:"name" json:"name,omitempty"` - Kind string `yaml:"kind" json:"kind,omitempty"` // Role | ClusterRole; defaults to Role -} - -// RoleTemplateSource declares one namespaced Role to be managed by Orkestra. -// -// Example: -// -// onCreate: -// roles: -// - name: "{{ .metadata.name }}-role" -// namespace: "{{ .metadata.name }}-ns" -// rules: -// - apiGroups: ["apps"] -// resources: ["deployments"] -// verbs: ["get", "list", "watch", "update", "patch"] -// resourceNames: ["{{ .metadata.name }}"] -type RoleTemplateSource struct { - Version string `yaml:"version" json:"version,omitempty"` - Name string `yaml:"name" json:"name,omitempty"` - Namespace string `yaml:"namespace" json:"namespace,omitempty"` - Labels []ResourceLabel `yaml:"labels" json:"labels,omitempty"` - Rules []PolicyRuleSpec `yaml:"rules" json:"rules,omitempty"` - Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` - Reconcile bool `yaml:"reconcile" json:"reconcile,omitempty"` - ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` - AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` -} - -// RoleBindingTemplateSource declares one RoleBinding to be managed by Orkestra. -// -// Example: -// -// onCreate: -// roleBindings: -// - name: "{{ .metadata.name }}-rolebinding" -// namespace: "{{ .metadata.name }}-ns" -// roleRef: -// name: "{{ .metadata.name }}-role" -// subjects: -// - kind: ServiceAccount -// name: "{{ .metadata.name }}-sa" -// namespace: "{{ .metadata.name }}-ns" -type RoleBindingTemplateSource struct { - Version string `yaml:"version" json:"version,omitempty"` - Name string `yaml:"name" json:"name,omitempty"` - Namespace string `yaml:"namespace" json:"namespace,omitempty"` - Labels []ResourceLabel `yaml:"labels" json:"labels,omitempty"` - RoleRef RoleRefSpec `yaml:"roleRef" json:"roleRef,omitempty"` - Subjects []SubjectSpec `yaml:"subjects" json:"subjects,omitempty"` - Conditions []Condition `yaml:"when,omitempty" json:"when,omitempty"` - Reconcile bool `yaml:"reconcile" json:"reconcile,omitempty"` - ForEach *ForEachSpec `yaml:"forEach,omitempty" json:"forEach,omitempty"` - AnyOf []Condition `yaml:"anyOf,omitempty" json:"anyOf,omitempty"` -} - // Placeholder for resources yet to be added to orkestra internal registry // pkg/orkestra-registry type PlaceholderSource struct{} diff --git a/website/content/docs/concepts/secret-rotation.md b/website/content/docs/concepts/secret-rotation.md index 8610fcd8..f2108c57 100644 --- a/website/content/docs/concepts/secret-rotation.md +++ b/website/content/docs/concepts/secret-rotation.md @@ -152,6 +152,6 @@ in the Control Center. - `tls:` block (cert generation) — 🔲 Designed, not yet implemented - `webhooks.createCerts: true` — 🔲 Designed, not yet implemented -The type definitions (`TLSSpec`, `ParseRotationDuration`, `NeedsRotation`) +The type definitions (`TLSSpec`, `ParseTimeDuration`, `NeedsRotation`) are in `pkg/types/secret_rotation.go`. Implementation in `run_secrets.go` follows the same check-before-act pattern as `once: true`. \ No newline at end of file diff --git a/website/public/docs/concepts/secret-rotation/index.html b/website/public/docs/concepts/secret-rotation/index.html index 519b3137..cd20db15 100644 --- a/website/public/docs/concepts/secret-rotation/index.html +++ b/website/public/docs/concepts/secret-rotation/index.html @@ -1094,7 +1094,7 @@

Implementation status

  • tls: block (cert generation) — 🔲 Designed, not yet implemented
  • webhooks.createCerts: true — 🔲 Designed, not yet implemented
  • -

    The type definitions (TLSSpec, ParseRotationDuration, NeedsRotation) +

    The type definitions (TLSSpec, ParseTimeDuration, NeedsRotation) are in pkg/types/secret_rotation.go. Implementation in run_secrets.go follows the same check-before-act pattern as once: true.

    From d4fcf9b2a45f981e732d0e80d83f375da8a10fbd Mon Sep 17 00:00:00 2001 From: ialexeze Date: Sun, 10 May 2026 02:22:23 +0000 Subject: [PATCH 2/2] dependency state reevaluation --- .../13-dependencies/01-in-binary/README.md | 37 +++++++++++-------- .../13-dependencies/01-in-binary/cleanup.sh | 0 .../13-dependencies/01-in-binary/katalog.yaml | 10 +++++ .../13-dependencies/02-cross-binary/README.md | 2 +- .../03-cross-cluster/README.md | 2 +- 5 files changed, 34 insertions(+), 17 deletions(-) mode change 100644 => 100755 examples/advanced/13-dependencies/01-in-binary/cleanup.sh diff --git a/examples/advanced/13-dependencies/01-in-binary/README.md b/examples/advanced/13-dependencies/01-in-binary/README.md index 77a02e09..ba6eeb54 100644 --- a/examples/advanced/13-dependencies/01-in-binary/README.md +++ b/examples/advanced/13-dependencies/01-in-binary/README.md @@ -1,10 +1,10 @@ -# 12 — Dependencies · 01: In Binary +# 13 — Dependencies · 01: In Binary `App` will not start reconciling until `Database` is healthy. No init containers, no polling loops, no Go code — just a single line in the Katalog. **What you learn:** `dependsOn`, the three `dependsOn` YAML formats, the `healthy` vs `started` conditions, how the `cross:` block reads the dependency's status for injection, and how Orkestra enforces ordering at the controller level. -**Builds on:** [13-01 — Cross Operator In Binary](../../13-cross-operator/01-in-binary/README.md) + --- @@ -68,13 +68,13 @@ kubectl apply -f crd.yaml --- -## Step 3 — Install Orkestra +## Step 3 — Run Orkestra and Control Center ```bash -helm repo add orkestra https://orkspace.github.io/orkestra -helm install orkestra orkestra/orkestra \ - --namespace orkestra-system \ - --wait --timeout 120s +ork run -f katalog.yaml + +# Another terminal +ork contro start ``` --- @@ -90,10 +90,13 @@ kubectl get app my-database ``` NAME IMAGE DB ENDPOINT PHASE AGE -my-database nginx:stable-alpine Pending 5s +my-database nginx:stable-alpine 5s ``` -App is pending — Database does not exist yet. Orkestra skips its reconcile without error. +No phase written for App — Database does not exist yet. Orkestra skips its reconcile without error. + +Check the control center on http://localhost:8081, you will see "Dependency Issue" for App. +Select App and scroll down to see why under `"Dependencies"` --- @@ -119,6 +122,8 @@ app.deps.orkestra.io/my-database nginx:stable-alpine my-database.default.sv Once Database reaches `Running`, Orkestra starts App's reconcile automatically. App picks up the endpoint from the cross block and injects it into its Deployment. +Check the control center and see App become healthy and the phase `Running`. + --- ## Step 6 — Verify the injected env @@ -129,7 +134,10 @@ kubectl get deployment my-database-deployment -o jsonpath='{.spec.template.spec. ```json [ - { "name": "DB_HOST", "value": "my-database.default.svc:5432" } + { + "name": "DB_HOST", + "value": "my-database.default.svc:5432" + } ] ``` @@ -137,14 +145,14 @@ kubectl get deployment my-database-deployment -o jsonpath='{.spec.template.spec. ## Step 7 — Simulate a dependency restart -Delete Database and watch App's behaviour: +Delete Database CRD and watch App's behaviour: ```bash -kubectl delete database my-database -kubectl get app my-database +kubectl delete crd databases.deps.orkestra.io ``` +Check the control center. -Orkestra detects that the dependency is gone and puts App back into `Pending`. Re-apply Database and App resumes within one resync cycle. +Orkestra detects that the dependency is gone and puts App back into `Dependency Issue`. Re-apply Database `(crd.yaml)` and App resumes within one resync cycle. --- @@ -152,5 +160,4 @@ Orkestra detects that the dependency is gone and puts App back into `Pending`. R ```bash chmod +x cleanup.sh && ./cleanup.sh -helm uninstall orkestra -n orkestra-system ``` diff --git a/examples/advanced/13-dependencies/01-in-binary/cleanup.sh b/examples/advanced/13-dependencies/01-in-binary/cleanup.sh old mode 100644 new mode 100755 diff --git a/examples/advanced/13-dependencies/01-in-binary/katalog.yaml b/examples/advanced/13-dependencies/01-in-binary/katalog.yaml index ff4305c1..697dac9c 100644 --- a/examples/advanced/13-dependencies/01-in-binary/katalog.yaml +++ b/examples/advanced/13-dependencies/01-in-binary/katalog.yaml @@ -85,8 +85,17 @@ spec: status: fields: + # ── Initial state ─────────────────────────────────────────── + - path: phase + value: "Pending" + when: + - field: cross.database.status.endpoint + equals: "" - path: phase value: "Running" + when: + - field: cross.database.status.phase + equals: "Running" - path: dbEndpoint value: "{{ .cross.database.status.endpoint }}" @@ -102,3 +111,4 @@ spec: services: - port: "80" targetPort: "8080" + reconcile: true diff --git a/examples/advanced/13-dependencies/02-cross-binary/README.md b/examples/advanced/13-dependencies/02-cross-binary/README.md index 88517055..c918e0dd 100644 --- a/examples/advanced/13-dependencies/02-cross-binary/README.md +++ b/examples/advanced/13-dependencies/02-cross-binary/README.md @@ -1,4 +1,4 @@ -# 12 — Dependencies · 02: Cross Binary +# 13 — Dependencies · 02: Cross Binary Same `dependsOn: database: healthy` ordering as `01-in-binary`, but Database runs in a **separate Orkestra deployment** in a hardened namespace. App's Orkestra resolves the dependency condition through the Database Orkestra's HTTP health API. diff --git a/examples/advanced/13-dependencies/03-cross-cluster/README.md b/examples/advanced/13-dependencies/03-cross-cluster/README.md index 5f3c25a7..3fa9cd59 100644 --- a/examples/advanced/13-dependencies/03-cross-cluster/README.md +++ b/examples/advanced/13-dependencies/03-cross-cluster/README.md @@ -1,4 +1,4 @@ -# 12 — Dependencies · 03: Cross Cluster +# 13 — Dependencies · 03: Cross Cluster Database lives in **cluster-a** (infrastructure cluster). App lives in **cluster-b** (application cluster) and will not start until Database in cluster-a is healthy — across a real network boundary.