From 65755aa20ccb6087954c208c5d1b1be38760aa23 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Wed, 21 Jan 2026 10:46:37 +0100
Subject: [PATCH 01/41] Rename impl: field to name:
---
api/v1alpha1/pipeline_types.go | 6 ++-
config/crd/bases/cortex.cloud_pipelines.yaml | 10 +++--
config/crd/cortex.cloud_pipelines.yaml | 10 +++--
.../templates/crd/cortex.cloud_pipelines.yaml | 10 +++--
.../cortex-ironcore/templates/pipelines.yaml | 2 +-
.../cortex-manila/templates/pipelines.yaml | 2 +-
.../cortex-nova/templates/pipelines.yaml | 40 +++++++++----------
.../cortex-pods/templates/pipelines.yaml | 6 +--
.../cinder/pipeline_controller_test.go | 2 +-
.../machines/pipeline_controller_test.go | 4 +-
.../manila/pipeline_controller_test.go | 4 +-
.../nova/pipeline_controller_test.go | 8 ++--
.../pods/pipeline_controller_test.go | 4 +-
.../scheduling/descheduling/nova/monitor.go | 7 ++--
.../descheduling/nova/monitor_test.go | 14 +++----
.../scheduling/descheduling/nova/pipeline.go | 10 ++---
.../nova/pipeline_controller_test.go | 4 +-
.../descheduling/nova/pipeline_test.go | 10 ++---
internal/scheduling/lib/pipeline.go | 12 +++---
.../scheduling/lib/pipeline_controller.go | 2 +-
.../lib/pipeline_controller_test.go | 34 ++++++++--------
internal/scheduling/lib/step_monitor.go | 7 ++--
internal/scheduling/lib/step_validation.go | 2 +-
23 files changed, 108 insertions(+), 102 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index f64ed2008..0e948e487 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -39,14 +39,16 @@ type WeigherSpec struct {
}
type StepSpec struct {
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
// The type of the scheduler step.
Type StepType `json:"type"`
// If the type is "weigher", this contains additional configuration for it.
// +kubebuilder:validation:Optional
Weigher *WeigherSpec `json:"weigher,omitempty"`
- // The name of the scheduler step in the cortex implementation.
- Impl string `json:"impl"`
// Additional configuration for the extractor that can be used
// +kubebuilder:validation:Optional
Opts runtime.RawExtension `json:"opts,omitempty"`
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index c6039f0dc..09cd5c4ec 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -78,9 +78,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- impl:
- description: The name of the scheduler step in the cortex implementation.
- type: string
knowledges:
description: Knowledges this step depends on to be ready.
items:
@@ -133,6 +130,11 @@ spec:
description: Whether this step is mandatory for the pipeline
to be runnable.
type: boolean
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
opts:
description: Additional configuration for the extractor that
can be used
@@ -165,8 +167,8 @@ spec:
type: object
type: object
required:
- - impl
- mandatory
+ - name
- type
type: object
type: array
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index c6039f0dc..09cd5c4ec 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -78,9 +78,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- impl:
- description: The name of the scheduler step in the cortex implementation.
- type: string
knowledges:
description: Knowledges this step depends on to be ready.
items:
@@ -133,6 +130,11 @@ spec:
description: Whether this step is mandatory for the pipeline
to be runnable.
type: boolean
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
opts:
description: Additional configuration for the extractor that
can be used
@@ -165,8 +167,8 @@ spec:
type: object
type: object
required:
- - impl
- mandatory
+ - name
- type
type: object
type: array
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 83075a59c..a8c987651 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -84,9 +84,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- impl:
- description: The name of the scheduler step in the cortex implementation.
- type: string
knowledges:
description: Knowledges this step depends on to be ready.
items:
@@ -139,6 +136,11 @@ spec:
description: Whether this step is mandatory for the pipeline
to be runnable.
type: boolean
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
opts:
description: Additional configuration for the extractor that
can be used
@@ -171,8 +173,8 @@ spec:
type: object
type: object
required:
- - impl
- mandatory
+ - name
- type
type: object
type: array
diff --git a/helm/bundles/cortex-ironcore/templates/pipelines.yaml b/helm/bundles/cortex-ironcore/templates/pipelines.yaml
index 231e95e47..99743fd03 100644
--- a/helm/bundles/cortex-ironcore/templates/pipelines.yaml
+++ b/helm/bundles/cortex-ironcore/templates/pipelines.yaml
@@ -11,7 +11,7 @@ spec:
createDecisions: true
steps:
- type: weigher
- impl: noop
+ name: noop
description: |
This is only a passthrough step which assigns a zero-weight to all machinepool
candidates. It is used as a placeholder step in the ironcore machines scheduler
diff --git a/helm/bundles/cortex-manila/templates/pipelines.yaml b/helm/bundles/cortex-manila/templates/pipelines.yaml
index aba1e5313..8aa32cefb 100644
--- a/helm/bundles/cortex-manila/templates/pipelines.yaml
+++ b/helm/bundles/cortex-manila/templates/pipelines.yaml
@@ -13,7 +13,7 @@ spec:
type: filter-weigher
steps:
- type: weigher
- impl: netapp_cpu_usage_balancing
+ name: netapp_cpu_usage_balancing
description: |
This step uses netapp storage pool cpu metrics condensed into a feature
to balance manila share placements across available storage pools.
diff --git a/helm/bundles/cortex-nova/templates/pipelines.yaml b/helm/bundles/cortex-nova/templates/pipelines.yaml
index c258e836f..f529d961b 100644
--- a/helm/bundles/cortex-nova/templates/pipelines.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines.yaml
@@ -16,7 +16,7 @@ spec:
createDecisions: false
steps:
- type: weigher
- impl: vmware_hana_binpacking
+ name: vmware_hana_binpacking
description: |
This step pulls HANA VMs onto the smallest possible gaps on HANA-exclusive
VMware hosts. In this way hosts with much free space are held free for
@@ -31,7 +31,7 @@ spec:
ramUtilizedAfterActivationUpperBound: 1.0
mandatory: false
- type: weigher
- impl: vmware_general_purpose_balancing
+ name: vmware_general_purpose_balancing
description: |
This step balances non-HANA VMs across non-HANA exclusive VMware hosts. It
pulls vms onto the freeest hosts possible to ensure an even distribution of
@@ -46,7 +46,7 @@ spec:
ramUtilizedActivationUpperBound: 0.0
mandatory: false
- type: weigher
- impl: vmware_avoid_long_term_contended_hosts
+ name: vmware_avoid_long_term_contended_hosts
description: |
This step avoids placing vms on vmware hosts with a high CPU contention over
a longer period of time, based on vrops contention metrics. In particular,
@@ -65,7 +65,7 @@ spec:
maxCPUContentionActivationUpperBound: -0.25
mandatory: false
- type: weigher
- impl: vmware_avoid_short_term_contended_hosts
+ name: vmware_avoid_short_term_contended_hosts
description: |
This step avoids placing vms on vmware hosts with a high CPU contention over
a shorter period of time, based on vrops contention metrics. In particular,
@@ -120,14 +120,14 @@ spec:
{{- end }}
steps:
- type: filter
- impl: filter_host_instructions
+ name: filter_host_instructions
description: |
This step will consider the `ignore_hosts` and `force_hosts` instructions
from the nova scheduler request spec to filter out or exclusively allow
certain hosts.
knowledges: []
- type: filter
- impl: filter_has_enough_capacity
+ name: filter_has_enough_capacity
description: |
This step will filter out hosts that do not have enough available capacity
to host the requested flavor. If enabled, this step will subtract the
@@ -139,35 +139,35 @@ spec:
# and flavor to overlap.
lockReserved: true
- type: filter
- impl: filter_has_requested_traits
+ name: filter_has_requested_traits
description: |
This step filters hosts that do not have the requested traits given by the
nova flavor extra spec: "trait:": "forbidden" means the host must
not have the specified trait. "trait:": "required" means the host
must have the specified trait.
- type: filter
- impl: filter_has_accelerators
+ name: filter_has_accelerators
description: |
This step will filter out hosts without the trait `COMPUTE_ACCELERATORS` if
the nova flavor extra specs request accelerators via "accel:device_profile".
- type: filter
- impl: filter_correct_az
+ name: filter_correct_az
description: |
This step will filter out hosts whose aggregate information indicates they
are not placed in the requested availability zone.
- type: filter
- impl: filter_status_conditions
+ name: filter_status_conditions
description: |
This step will filter out hosts for which the hypervisor status conditions
do not meet the expected values, for example, that the hypervisor is ready
and not disabled.
- type: filter
- impl: filter_maintenance
+ name: filter_maintenance
description: |
This step will filter out hosts that are currently in maintenance mode that
prevents scheduling, for example, manual maintenance or termination.
- type: filter
- impl: filter_external_customer
+ name: filter_external_customer
description: |
This step prefix-matches the domain name for external customer domains and
filters out hosts that are not intended for external customers. It considers
@@ -176,20 +176,20 @@ spec:
opts:
domainNamePrefixes: ["iaas-"]
- type: filter
- impl: filter_packed_virtqueue
+ name: filter_packed_virtqueue
description: |
If the flavor extra specs contain the `hw:virtio_packed_ring` key, or the
image properties contain the `hw_virtio_packed_ring` key, this step will
filter out hosts that do not have the `COMPUTE_NET_VIRTIO_PACKED` trait.
- type: filter
- impl: filter_allowed_projects
+ name: filter_allowed_projects
description: |
This step filters hosts based on allowed projects defined in the
hypervisor resource. Note that hosts allowing all projects are still
accessible and will not be filtered out. In this way some hypervisors
are made accessible to some projects only.
- type: filter
- impl: filter_capabilities
+ name: filter_capabilities
description: |
This step will filter out hosts that do not meet the compute capabilities
requested by the nova flavor extra specs, like `{"arch": "x86_64",
@@ -199,24 +199,24 @@ spec:
like `>`, `!`, ... are not supported because they are not used by any of our
flavors in production.
- type: filter
- impl: filter_instance_group_affinity
+ name: filter_instance_group_affinity
description: |
This step selects hosts in the instance group specified in the nova
scheduler request spec.
- type: filter
- impl: filter_instance_group_anti_affinity
+ name: filter_instance_group_anti_affinity
description: |
This step selects hosts not in the instance group specified in the nova
scheduler request spec, but only until the max_server_per_host limit is
reached (default = 1).
- type: filter
- impl: filter_live_migratable
+ name: filter_live_migratable
description: |
This step ensures that the target host of a live migration can accept
the migrating VM, by checking cpu architecture, cpu features, emulated
devices, and cpu modes.
- type: filter
- impl: filter_requested_destination
+ name: filter_requested_destination
description: |
This step filters hosts based on the `requested_destination` instruction
from the nova scheduler request spec. It supports filtering by host and
@@ -238,7 +238,7 @@ spec:
{{- end }}
steps:
- type: descheduler
- impl: avoid_high_steal_pct
+ name: avoid_high_steal_pct
description: |
This step will deschedule VMs once they reach this CPU steal percentage over
the observed time span.
diff --git a/helm/bundles/cortex-pods/templates/pipelines.yaml b/helm/bundles/cortex-pods/templates/pipelines.yaml
index ed41ca3b2..055e2190d 100644
--- a/helm/bundles/cortex-pods/templates/pipelines.yaml
+++ b/helm/bundles/cortex-pods/templates/pipelines.yaml
@@ -11,21 +11,21 @@ spec:
createDecisions: true
steps:
- type: filter
- impl: noop
+ name: noop
description: |
This is only a passthrough step which lets all pod candidates through.
It is used as a placeholder step in the pods scheduler pipeline.
knowledges: []
mandatory: false
- type: filter
- impl: taint
+ name: taint
description: |
Filters nodes based on taints, excluding nodes with NoSchedule taints
unless the pod has matching tolerations.
knowledges: []
mandatory: true
- type: filter
- impl: nodeaffinity
+ name: nodeaffinity
description: |
Filters nodes based on pod's node affinity requirements, matching
nodes that satisfy the specified label selectors.
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 091958392..e3a867eb7 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -483,7 +483,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-plugin",
+ Name: "test-plugin",
},
},
expectError: true, // Expected because test-plugin is not in supportedSteps
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 821639c6f..87875bba1 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -223,7 +223,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "noop step",
steps: []v1alpha1.StepSpec{
{
- Impl: "noop",
+ Name: "noop",
Type: v1alpha1.StepTypeFilter,
},
},
@@ -233,7 +233,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
steps: []v1alpha1.StepSpec{
{
- Impl: "unsupported",
+ Name: "unsupported",
Type: v1alpha1.StepTypeFilter,
},
},
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index ddfd3fce5..04044911b 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -479,7 +479,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeWeigher,
- Impl: "netapp_cpu_usage_balancing",
+ Name: "netapp_cpu_usage_balancing",
Opts: runtime.RawExtension{
Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
},
@@ -492,7 +492,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "unsupported-plugin",
+ Name: "unsupported-plugin",
},
},
expectError: true,
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index fa8ab962c..6f704ce01 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -274,7 +274,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "filter_status_conditions",
+ Name: "filter_status_conditions",
},
},
expectError: false,
@@ -284,7 +284,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "unsupported-plugin",
+ Name: "unsupported-plugin",
},
},
expectError: true,
@@ -294,7 +294,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "filter_status_conditions",
+ Name: "filter_status_conditions",
Opts: runtime.RawExtension{
Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
},
@@ -307,7 +307,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "filter_status_conditions",
+ Name: "filter_status_conditions",
Opts: runtime.RawExtension{
Raw: []byte(`invalid json`),
},
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 0c57fd1e3..4a46e3a80 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -198,7 +198,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "noop step",
steps: []v1alpha1.StepSpec{
{
- Impl: "noop",
+ Name: "noop",
Type: v1alpha1.StepTypeFilter,
},
},
@@ -208,7 +208,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
steps: []v1alpha1.StepSpec{
{
- Impl: "unsupported",
+ Name: "unsupported",
Type: v1alpha1.StepTypeFilter,
},
},
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index 239c2f921..d1b9ac0da 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -84,18 +84,17 @@ type StepMonitor struct {
// Monitor a step by wrapping it with a StepMonitor.
func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
- name := conf.Impl
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
- runTimer = monitor.stepRunTimer.WithLabelValues(name)
+ runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
}
var descheduledCounter prometheus.Counter
if monitor.stepDeschedulingCounter != nil {
- descheduledCounter = monitor.stepDeschedulingCounter.WithLabelValues(name)
+ descheduledCounter = monitor.stepDeschedulingCounter.WithLabelValues(conf.Name)
}
return StepMonitor{
step: step,
- stepName: name,
+ stepName: conf.Name,
runTimer: runTimer,
descheduledCounter: descheduledCounter,
}
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index 7c665af06..1f8e658de 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -97,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Impl: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index 838620d58..d1c3445cf 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -41,17 +41,17 @@ func (p *Pipeline) Init(
// Load all steps from the configuration.
p.steps = make(map[string]Step, len(confedSteps))
for _, stepConf := range confedSteps {
- step, ok := supportedSteps[stepConf.Impl]
+ step, ok := supportedSteps[stepConf.Name]
if !ok {
- return errors.New("descheduler: unsupported step: " + stepConf.Impl)
+ return errors.New("descheduler: unsupported step: " + stepConf.Name)
}
step = monitorStep(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
return err
}
- p.steps[stepConf.Impl] = step
- p.order = append(p.order, stepConf.Impl)
- slog.Info("descheduler: added step", "name", stepConf.Impl)
+ p.steps[stepConf.Name] = step
+ p.order = append(p.order, stepConf.Name)
+ slog.Info("descheduler: added step", "name", stepConf.Name)
}
return nil
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 78369e5a0..74d741647 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -49,7 +49,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeDescheduler,
- Impl: "mock-step",
+ Name: "mock-step",
},
},
expectError: false,
@@ -60,7 +60,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
{
Type: v1alpha1.StepTypeDescheduler,
- Impl: "unsupported",
+ Name: "unsupported",
},
},
expectError: true,
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index c5e00b9f8..b7a3472c3 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -52,7 +52,7 @@ func TestPipeline_Init(t *testing.T) {
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.StepSpec{{
- Impl: "test-step",
+ Name: "test-step",
Type: v1alpha1.StepTypeDescheduler,
}},
expectedSteps: 1,
@@ -63,7 +63,7 @@ func TestPipeline_Init(t *testing.T) {
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.StepSpec{{
- Impl: "unsupported-step",
+ Name: "unsupported-step",
Type: v1alpha1.StepTypeDescheduler,
}},
expectedError: true,
@@ -74,7 +74,7 @@ func TestPipeline_Init(t *testing.T) {
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
confedSteps: []v1alpha1.StepSpec{{
- Impl: "failing-step",
+ Name: "failing-step",
Type: v1alpha1.StepTypeDescheduler,
}},
expectedError: true,
@@ -87,11 +87,11 @@ func TestPipeline_Init(t *testing.T) {
},
confedSteps: []v1alpha1.StepSpec{
{
- Impl: "step1",
+ Name: "step1",
Type: v1alpha1.StepTypeDescheduler,
},
{
- Impl: "step2",
+ Name: "step2",
Type: v1alpha1.StepTypeDescheduler,
},
},
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 48b05b084..3fde3f226 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -59,11 +59,11 @@ func NewPipeline[RequestType PipelineRequest](
pipelineMonitor := monitor.SubPipeline(name)
for _, stepConfig := range confedSteps {
- slog.Info("scheduler: configuring step", "name", stepConfig.Impl)
+ slog.Info("scheduler: configuring step", "name", stepConfig.Name)
slog.Info("supported:", "steps", maps.Keys(supportedSteps))
- makeStep, ok := supportedSteps[stepConfig.Impl]
+ makeStep, ok := supportedSteps[stepConfig.Name]
if !ok {
- return nil, errors.New("unsupported scheduler step impl: " + stepConfig.Impl)
+ return nil, errors.New("unsupported scheduler step name: " + stepConfig.Name)
}
step := makeStep()
if stepConfig.Type == v1alpha1.StepTypeWeigher && stepConfig.Weigher != nil {
@@ -73,11 +73,11 @@ func NewPipeline[RequestType PipelineRequest](
if err := step.Init(ctx, client, stepConfig); err != nil {
return nil, errors.New("failed to initialize pipeline step: " + err.Error())
}
- stepsByName[stepConfig.Impl] = step
- order = append(order, stepConfig.Impl)
+ stepsByName[stepConfig.Name] = step
+ order = append(order, stepConfig.Name)
slog.Info(
"scheduler: added step",
- "name", stepConfig.Impl,
+ "name", stepConfig.Name,
)
}
return &pipeline[RequestType]{
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 731d4d048..36a46f78e 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -102,7 +102,7 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
Type: v1alpha1.PipelineConditionReady,
Status: metav1.ConditionFalse,
Reason: "MandatoryStepNotReady",
- Message: fmt.Sprintf("mandatory step %s not ready: %s", step.Impl, err.Error()),
+ Message: fmt.Sprintf("mandatory step %s not ready: %s", step.Name, err.Error()),
})
patch := client.MergeFrom(old)
if err := c.Status().Patch(ctx, obj, patch); err != nil {
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 9fb7b9b90..034186cbc 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -200,7 +200,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Mandatory: true,
Knowledges: []corev1.ObjectReference{
{Name: "knowledge-1", Namespace: "default"},
@@ -239,7 +239,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Mandatory: true,
Knowledges: []corev1.ObjectReference{
{Name: "missing-knowledge", Namespace: "default"},
@@ -265,7 +265,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Mandatory: false,
Knowledges: []corev1.ObjectReference{
{Name: "missing-knowledge", Namespace: "default"},
@@ -326,7 +326,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Mandatory: true,
Knowledges: []corev1.ObjectReference{
{Name: "error-knowledge", Namespace: "default"},
@@ -563,7 +563,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with no knowledge dependencies",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{},
},
knowledges: []v1alpha1.Knowledge{},
@@ -573,7 +573,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with ready knowledge",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "ready-knowledge", Namespace: "default"},
},
@@ -595,7 +595,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with knowledge in error state",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "error-knowledge", Namespace: "default"},
},
@@ -622,7 +622,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with knowledge with no data",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "no-data-knowledge", Namespace: "default"},
},
@@ -644,7 +644,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with missing knowledge",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "missing-knowledge", Namespace: "default"},
},
@@ -656,7 +656,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with multiple knowledges, all ready",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "knowledge-1", Namespace: "default"},
{Name: "knowledge-2", Namespace: "default"},
@@ -688,7 +688,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
name: "step with multiple knowledges, some not ready",
step: v1alpha1.StepSpec{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "ready-knowledge", Namespace: "default"},
{Name: "not-ready-knowledge", Namespace: "default"},
@@ -784,7 +784,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -802,7 +802,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "other-knowledge", Namespace: "default"},
},
@@ -836,7 +836,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -914,7 +914,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1066,7 +1066,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1135,7 +1135,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Steps: []v1alpha1.StepSpec{
{
Type: v1alpha1.StepTypeFilter,
- Impl: "test-filter",
+ Name: "test-filter",
Mandatory: true,
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index ed6a79bdf..2e361c1b3 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -57,20 +57,19 @@ func monitorStep[RequestType PipelineRequest](
m PipelineMonitor,
) *StepMonitor[RequestType] {
- stepName := step.Impl
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, stepName)
+ WithLabelValues(m.PipelineName, step.Name)
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, stepName)
+ WithLabelValues(m.PipelineName, step.Name)
}
return &StepMonitor[RequestType]{
Step: impl,
- stepName: stepName,
+ stepName: step.Name,
pipelineName: m.PipelineName,
runTimer: runTimer,
stepSubjectWeight: m.stepSubjectWeight,
diff --git a/internal/scheduling/lib/step_validation.go b/internal/scheduling/lib/step_validation.go
index 638bf6e84..87cfa759a 100644
--- a/internal/scheduling/lib/step_validation.go
+++ b/internal/scheduling/lib/step_validation.go
@@ -24,7 +24,7 @@ type StepValidator[RequestType PipelineRequest] struct {
// Initialize the wrapped step with the database and options.
func (s *StepValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
slog.Info(
- "scheduler: init validation for step", "name", step.Impl,
+ "scheduler: init validation for step", "name", step.Name,
"disabled", s.DisabledValidations,
)
return s.Step.Init(ctx, client, step)
From e1c6f8c1b9574dc9ccca40ec8cd67c05269df44b Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Wed, 21 Jan 2026 16:13:40 +0100
Subject: [PATCH 02/41] Use filters:, weighers:, and detectors: (wip)
---
api/delegation/cinder/messages.go | 16 +-
api/delegation/ironcore/messages.go | 11 +
api/delegation/manila/messages.go | 16 +-
api/delegation/nova/messages.go | 12 +
api/delegation/pods/messages.go | 11 +
api/v1alpha1/pipeline_types.go | 107 +++++---
api/v1alpha1/zz_generated.deepcopy.go | 67 +++--
config/crd/bases/cortex.cloud_pipelines.yaml | 185 ++++++++++---
config/crd/cortex.cloud_pipelines.yaml | 185 ++++++++++---
.../templates/crd/cortex.cloud_pipelines.yaml | 185 ++++++++++---
.../decisions/cinder/pipeline_controller.go | 7 +-
.../cinder/pipeline_controller_test.go | 37 ++-
.../decisions/cinder/supported_steps.go | 8 +-
.../decisions/machines/pipeline_controller.go | 7 +-
.../machines/pipeline_controller_test.go | 28 +-
.../decisions/machines/supported_steps.go | 8 +-
.../decisions/manila/pipeline_controller.go | 7 +-
.../manila/pipeline_controller_test.go | 42 +--
.../decisions/manila/supported_steps.go | 8 +-
.../decisions/nova/pipeline_controller.go | 7 +-
.../nova/pipeline_controller_test.go | 74 +++---
.../decisions/nova/supported_steps.go | 39 +--
.../decisions/pods/pipeline_controller.go | 7 +-
.../pods/pipeline_controller_test.go | 28 +-
.../decisions/pods/supported_steps.go | 11 +-
.../scheduling/descheduling/nova/monitor.go | 6 +-
.../descheduling/nova/monitor_test.go | 16 +-
.../scheduling/descheduling/nova/pipeline.go | 2 +-
.../descheduling/nova/pipeline_controller.go | 2 +-
.../nova/pipeline_controller_test.go | 17 +-
.../descheduling/nova/pipeline_test.go | 27 +-
.../descheduling/nova/plugins/base.go | 6 +-
.../descheduling/nova/plugins/base_test.go | 16 +-
.../nova/plugins/kvm/avoid_high_steal_pct.go | 4 +-
internal/scheduling/descheduling/nova/step.go | 2 +-
internal/scheduling/lib/pipeline.go | 193 +++++++++-----
.../scheduling/lib/pipeline_controller.go | 64 ++---
.../lib/pipeline_controller_test.go | 245 +++++-------------
internal/scheduling/lib/pipeline_test.go | 72 +++--
internal/scheduling/lib/request.go | 5 +
internal/scheduling/lib/request_test.go | 9 +-
internal/scheduling/lib/step.go | 8 +
internal/scheduling/lib/step_validation.go | 68 -----
.../scheduling/lib/step_validation_test.go | 135 ----------
internal/scheduling/lib/weigher_validation.go | 54 ++++
.../scheduling/lib/weigher_validation_test.go | 79 ++++++
46 files changed, 1290 insertions(+), 853 deletions(-)
delete mode 100644 internal/scheduling/lib/step_validation.go
delete mode 100644 internal/scheduling/lib/step_validation_test.go
create mode 100644 internal/scheduling/lib/weigher_validation.go
create mode 100644 internal/scheduling/lib/weigher_validation_test.go
diff --git a/api/delegation/cinder/messages.go b/api/delegation/cinder/messages.go
index b1e9bebbf..5c9b0225f 100644
--- a/api/delegation/cinder/messages.go
+++ b/api/delegation/cinder/messages.go
@@ -3,7 +3,11 @@
package api
-import "log/slog"
+import (
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
// Host object from the Cinder scheduler pipeline.
type ExternalSchedulerHost struct {
@@ -46,6 +50,16 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+ filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
+ for _, host := range r.Hosts {
+ if _, exists := includedSubjects[host.VolumeHost]; exists {
+ filteredHosts = append(filteredHosts, host)
+ }
+ }
+ r.Hosts = filteredHosts
+ return r
+}
// Response generated by cortex for the Cinder scheduler.
// Cortex returns an ordered list of hosts that the share should be scheduled on.
diff --git a/api/delegation/ironcore/messages.go b/api/delegation/ironcore/messages.go
index 37c1bf8c7..61d90b097 100644
--- a/api/delegation/ironcore/messages.go
+++ b/api/delegation/ironcore/messages.go
@@ -7,6 +7,7 @@ import (
"log/slog"
ironcorev1alpha1 "github.com/cobaltcore-dev/cortex/api/delegation/ironcore/v1alpha1"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
type MachinePipelineRequest struct {
@@ -31,3 +32,13 @@ func (r MachinePipelineRequest) GetWeights() map[string]float64 {
func (r MachinePipelineRequest) GetTraceLogArgs() []slog.Attr {
return []slog.Attr{}
}
+func (r MachinePipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+ filteredPools := make([]ironcorev1alpha1.MachinePool, 0, len(includedSubjects))
+ for _, pool := range r.Pools {
+ if _, exists := includedSubjects[pool.Name]; exists {
+ filteredPools = append(filteredPools, pool)
+ }
+ }
+ r.Pools = filteredPools
+ return r
+}
diff --git a/api/delegation/manila/messages.go b/api/delegation/manila/messages.go
index b1b4eb26e..9b8afbaed 100644
--- a/api/delegation/manila/messages.go
+++ b/api/delegation/manila/messages.go
@@ -3,7 +3,11 @@
package api
-import "log/slog"
+import (
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
// Host object from the Manila scheduler pipeline.
type ExternalSchedulerHost struct {
@@ -46,6 +50,16 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+ filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
+ for _, host := range r.Hosts {
+ if _, exists := includedSubjects[host.ShareHost]; exists {
+ filteredHosts = append(filteredHosts, host)
+ }
+ }
+ r.Hosts = filteredHosts
+ return r
+}
// Response generated by cortex for the Manila scheduler.
// Cortex returns an ordered list of hosts that the share should be scheduled on.
diff --git a/api/delegation/nova/messages.go b/api/delegation/nova/messages.go
index ff69ca2e9..117071a8f 100644
--- a/api/delegation/nova/messages.go
+++ b/api/delegation/nova/messages.go
@@ -7,6 +7,8 @@ import (
"errors"
"fmt"
"log/slog"
+
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
// Host object from the Nova scheduler pipeline.
@@ -69,6 +71,16 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+ filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
+ for _, host := range r.Hosts {
+ if _, exists := includedSubjects[host.ComputeHost]; exists {
+ filteredHosts = append(filteredHosts, host)
+ }
+ }
+ r.Hosts = filteredHosts
+ return r
+}
// Response generated by cortex for the Nova scheduler.
// Cortex returns an ordered list of hosts that the VM should be scheduled on.
diff --git a/api/delegation/pods/messages.go b/api/delegation/pods/messages.go
index a3c9fd956..c1ae8fe5c 100644
--- a/api/delegation/pods/messages.go
+++ b/api/delegation/pods/messages.go
@@ -6,6 +6,7 @@ package pods
import (
"log/slog"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
corev1 "k8s.io/api/core/v1"
)
@@ -33,3 +34,13 @@ func (r PodPipelineRequest) GetWeights() map[string]float64 {
func (r PodPipelineRequest) GetTraceLogArgs() []slog.Attr {
return []slog.Attr{}
}
+func (r PodPipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+ filteredNodes := make([]corev1.Node, 0, len(includedSubjects))
+ for _, node := range r.Nodes {
+ if _, exists := includedSubjects[node.Name]; exists {
+ filteredNodes = append(filteredNodes, node)
+ }
+ }
+ r.Nodes = filteredNodes
+ return r
+}
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 0e948e487..cf965111f 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -9,33 +9,42 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
-type DisabledValidationsSpec struct {
- // Whether to validate that no subjects are removed or added from the scheduler
- // step. This should only be disabled for scheduler steps that remove subjects.
- // Thus, if no value is provided, the default is false.
- SameSubjectNumberInOut bool `json:"sameSubjectNumberInOut,omitempty"`
- // Whether to validate that, after running the step, there are remaining subjects.
- // This should only be disabled for scheduler steps that are expected to
- // remove all subjects.
- SomeSubjectsRemain bool `json:"someSubjectsRemain,omitempty"`
+// Filters remove host candidates from an initial set, leaving
+// valid candidates. Filters are run before weighers are applied, as
+// part of a filter-weigher scheduling pipeline.
+type FilterSpec struct {
+ StepSpec `json:",inline"` // Embed common step spec fields.
+
+ // Filters are not allowed to depend on knowledges, as knowledges can
+ // be outdated leading to invalid filtering decisions.
}
-type StepType string
+// Weighers assign weights to the remaining host candidates after filtering,
+// making some hosts more preferable than others. Weighers are run
+// after filters are applied, as part of a filter-weigher scheduling pipeline.
+type WeigherSpec struct {
+ StepSpec `json:",inline"` // Embed common step spec fields.
-const (
- // Step for assigning weights to hosts.
- StepTypeWeigher StepType = "weigher"
- // Step for filtering hosts.
- StepTypeFilter StepType = "filter"
- // Step for generating descheduling recommendations.
- StepTypeDescheduler StepType = "descheduler"
-)
+ // Knowledges this step depends on to be ready.
+ //
+ // Weighers can depend on knowledges as they don't break valid placements,
+ // they only make it more optimal.
+ // +kubebuilder:validation:Optional
+ Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
+}
-type WeigherSpec struct {
- // The validations to disable for this step. If none are provided, all
- // applied validations are enabled.
+// Detectors find candidates for descheduling (migration off current host).
+// These detectors are run after weighers are applied, as part of a
+// descheduler scheduling pipeline.
+type DetectorSpec struct {
+ StepSpec `json:",inline"` // Embed common step spec fields.
+
+ // Knowledges this step depends on to be ready.
+ //
+ // Detectors can depend on knowledges as they don't ensure valid placements
+ // and therefore are not on the critical path.
// +kubebuilder:validation:Optional
- DisabledValidations DisabledValidationsSpec `json:"disabledValidations,omitempty"`
+ Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
type StepSpec struct {
@@ -43,26 +52,14 @@ type StepSpec struct {
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
- // The type of the scheduler step.
- Type StepType `json:"type"`
- // If the type is "weigher", this contains additional configuration for it.
- // +kubebuilder:validation:Optional
- Weigher *WeigherSpec `json:"weigher,omitempty"`
-
// Additional configuration for the extractor that can be used
// +kubebuilder:validation:Optional
Opts runtime.RawExtension `json:"opts,omitempty"`
- // Knowledges this step depends on to be ready.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
+
// Additional description of the step which helps understand its purpose
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
-
- // Whether this step is mandatory for the pipeline to be runnable.
- // +kubebuilder:default=true
- Mandatory bool `json:"mandatory"`
}
type PipelineType string
@@ -80,17 +77,49 @@ type PipelineSpec struct {
// SchedulingDomain defines in which scheduling domain this pipeline
// is used (e.g., nova, cinder, manila).
SchedulingDomain SchedulingDomain `json:"schedulingDomain"`
- // An optional description of the pipeline.
+
+ // An optional description of the pipeline, helping understand its purpose.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
+
// If this pipeline should create decision objects.
// When this is false, the pipeline will still process requests.
// +kubebuilder:default=false
CreateDecisions bool `json:"createDecisions,omitempty"`
- // The type of the pipeline.
+
+ // The type of the pipeline, used to differentiate between
+ // filter-weigher and descheduler pipelines within the same
+ // scheduling domain.
+ //
+ // If the type is filter-weigher, the filter and weigher attributes
+ // must be set. If the type is descheduler, the detectors attribute
+ // must be set.
+ //
+ // +kubebuilder:validation:Enum=filter-weigher;descheduler
Type PipelineType `json:"type"`
- // The ordered list of steps that make up this pipeline.
- Steps []StepSpec `json:"steps,omitempty"`
+
+ // Ordered list of filters to apply in a scheduling pipeline.
+ //
+ // This attribute is set only if the pipeline type is filter-weigher.
+ // Filters remove host candidates from an initial set, leaving
+ // valid candidates. Filters are run before weighers are applied.
+ // +kubebuilder:validation:Optional
+ Filters []FilterSpec `json:"filters,omitempty"`
+
+ // Ordered list of weighers to apply in a scheduling pipeline.
+ //
+ // This attribute is set only if the pipeline type is filter-weigher.
+ // These weighers are run after filters are applied.
+ // +kubebuilder:validation:Optional
+ Weighers []WeigherSpec `json:"weighers,omitempty"`
+
+ // Ordered list of detectors to apply in a descheduling pipeline.
+ //
+ // This attribute is set only if the pipeline type is descheduler.
+ // Detectors find candidates for descheduling (migration off current host).
+ // These detectors are run after weighers are applied.
+ // +kubebuilder:validation:Optional
+ Detectors []DetectorSpec `json:"detectors,omitempty"`
}
const (
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index a9ac69e2f..b75142551 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -426,16 +426,38 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DisabledValidationsSpec) DeepCopyInto(out *DisabledValidationsSpec) {
+func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
*out = *in
+ in.StepSpec.DeepCopyInto(&out.StepSpec)
+ if in.Knowledges != nil {
+ in, out := &in.Knowledges, &out.Knowledges
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec.
+func (in *DetectorSpec) DeepCopy() *DetectorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DetectorSpec)
+ in.DeepCopyInto(out)
+ return out
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledValidationsSpec.
-func (in *DisabledValidationsSpec) DeepCopy() *DisabledValidationsSpec {
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
+ *out = *in
+ in.StepSpec.DeepCopyInto(&out.StepSpec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
+func (in *FilterSpec) DeepCopy() *FilterSpec {
if in == nil {
return nil
}
- out := new(DisabledValidationsSpec)
+ out := new(FilterSpec)
in.DeepCopyInto(out)
return out
}
@@ -855,9 +877,23 @@ func (in *PipelineList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
- if in.Steps != nil {
- in, out := &in.Steps, &out.Steps
- *out = make([]StepSpec, len(*in))
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]FilterSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Weighers != nil {
+ in, out := &in.Weighers, &out.Weighers
+ *out = make([]WeigherSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Detectors != nil {
+ in, out := &in.Detectors, &out.Detectors
+ *out = make([]DetectorSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1101,17 +1137,7 @@ func (in *StepResult) DeepCopy() *StepResult {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
- if in.Weigher != nil {
- in, out := &in.Weigher, &out.Weigher
- *out = new(WeigherSpec)
- **out = **in
- }
in.Opts.DeepCopyInto(&out.Opts)
- if in.Knowledges != nil {
- in, out := &in.Knowledges, &out.Knowledges
- *out = make([]v1.ObjectReference, len(*in))
- copy(*out, *in)
- }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
@@ -1127,7 +1153,12 @@ func (in *StepSpec) DeepCopy() *StepSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
*out = *in
- out.DisabledValidations = in.DisabledValidations
+ in.StepSpec.DeepCopyInto(&out.StepSpec)
+ if in.Knowledges != nil {
+ in, out := &in.Knowledges, &out.Knowledges
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherSpec.
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 09cd5c4ec..794acf0b1 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -62,16 +62,21 @@ spec:
When this is false, the pipeline will still process requests.
type: boolean
description:
- description: An optional description of the pipeline.
+ description: An optional description of the pipeline, helping understand
+ its purpose.
type: string
- schedulingDomain:
+ detectors:
description: |-
- SchedulingDomain defines in which scheduling domain this pipeline
- is used (e.g., nova, cinder, manila).
- type: string
- steps:
- description: The ordered list of steps that make up this pipeline.
+ Ordered list of detectors to apply in a descheduling pipeline.
+
+ This attribute is set only if the pipeline type is descheduler.
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -79,7 +84,11 @@ spec:
and decisions made by it.
type: string
knowledges:
- description: Knowledges this step depends on to be ready.
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -125,11 +134,6 @@ spec:
type: object
x-kubernetes-map-type: atomic
type: array
- mandatory:
- default: true
- description: Whether this step is mandatory for the pipeline
- to be runnable.
- type: boolean
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -140,41 +144,142 @@ spec:
can be used
type: object
x-kubernetes-preserve-unknown-fields: true
- type:
- description: The type of the scheduler step.
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: |-
+ Ordered list of filters to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied.
+ items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
type: string
- weigher:
- description: If the type is "weigher", this contains additional
- configuration for it.
- properties:
- disabledValidations:
- description: |-
- The validations to disable for this step. If none are provided, all
- applied validations are enabled.
- properties:
- sameSubjectNumberInOut:
- description: |-
- Whether to validate that no subjects are removed or added from the scheduler
- step. This should only be disabled for scheduler steps that remove subjects.
- Thus, if no value is provided, the default is false.
- type: boolean
- someSubjectsRemain:
- description: |-
- Whether to validate that, after running the step, there are remaining subjects.
- This should only be disabled for scheduler steps that are expected to
- remove all subjects.
- type: boolean
- type: object
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
type: object
+ x-kubernetes-preserve-unknown-fields: true
required:
- - mandatory
- name
- - type
type: object
type: array
+ schedulingDomain:
+ description: |-
+ SchedulingDomain defines in which scheduling domain this pipeline
+ is used (e.g., nova, cinder, manila).
+ type: string
type:
- description: The type of the pipeline.
+ description: |-
+ The type of the pipeline, used to differentiate between
+ filter-weigher and descheduler pipelines within the same
+ scheduling domain.
+
+ If the type is filter-weigher, the filter and weigher attributes
+ must be set. If the type is descheduler, the detectors attribute
+ must be set.
+ enum:
+ - filter-weigher
+ - descheduler
type: string
+ weighers:
+ description: |-
+ Ordered list of weighers to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ These weighers are run after filters are applied.
+ items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
+ type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - name
+ type: object
+ type: array
required:
- schedulingDomain
- type
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 09cd5c4ec..794acf0b1 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -62,16 +62,21 @@ spec:
When this is false, the pipeline will still process requests.
type: boolean
description:
- description: An optional description of the pipeline.
+ description: An optional description of the pipeline, helping understand
+ its purpose.
type: string
- schedulingDomain:
+ detectors:
description: |-
- SchedulingDomain defines in which scheduling domain this pipeline
- is used (e.g., nova, cinder, manila).
- type: string
- steps:
- description: The ordered list of steps that make up this pipeline.
+ Ordered list of detectors to apply in a descheduling pipeline.
+
+ This attribute is set only if the pipeline type is descheduler.
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -79,7 +84,11 @@ spec:
and decisions made by it.
type: string
knowledges:
- description: Knowledges this step depends on to be ready.
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -125,11 +134,6 @@ spec:
type: object
x-kubernetes-map-type: atomic
type: array
- mandatory:
- default: true
- description: Whether this step is mandatory for the pipeline
- to be runnable.
- type: boolean
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -140,41 +144,142 @@ spec:
can be used
type: object
x-kubernetes-preserve-unknown-fields: true
- type:
- description: The type of the scheduler step.
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: |-
+ Ordered list of filters to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied.
+ items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
type: string
- weigher:
- description: If the type is "weigher", this contains additional
- configuration for it.
- properties:
- disabledValidations:
- description: |-
- The validations to disable for this step. If none are provided, all
- applied validations are enabled.
- properties:
- sameSubjectNumberInOut:
- description: |-
- Whether to validate that no subjects are removed or added from the scheduler
- step. This should only be disabled for scheduler steps that remove subjects.
- Thus, if no value is provided, the default is false.
- type: boolean
- someSubjectsRemain:
- description: |-
- Whether to validate that, after running the step, there are remaining subjects.
- This should only be disabled for scheduler steps that are expected to
- remove all subjects.
- type: boolean
- type: object
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
type: object
+ x-kubernetes-preserve-unknown-fields: true
required:
- - mandatory
- name
- - type
type: object
type: array
+ schedulingDomain:
+ description: |-
+ SchedulingDomain defines in which scheduling domain this pipeline
+ is used (e.g., nova, cinder, manila).
+ type: string
type:
- description: The type of the pipeline.
+ description: |-
+ The type of the pipeline, used to differentiate between
+ filter-weigher and descheduler pipelines within the same
+ scheduling domain.
+
+ If the type is filter-weigher, the filter and weigher attributes
+ must be set. If the type is descheduler, the detectors attribute
+ must be set.
+ enum:
+ - filter-weigher
+ - descheduler
type: string
+ weighers:
+ description: |-
+ Ordered list of weighers to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ These weighers are run after filters are applied.
+ items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
+ type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - name
+ type: object
+ type: array
required:
- schedulingDomain
- type
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index a8c987651..679bab0fb 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -68,16 +68,21 @@ spec:
When this is false, the pipeline will still process requests.
type: boolean
description:
- description: An optional description of the pipeline.
+ description: An optional description of the pipeline, helping understand
+ its purpose.
type: string
- schedulingDomain:
+ detectors:
description: |-
- SchedulingDomain defines in which scheduling domain this pipeline
- is used (e.g., nova, cinder, manila).
- type: string
- steps:
- description: The ordered list of steps that make up this pipeline.
+ Ordered list of detectors to apply in a descheduling pipeline.
+
+ This attribute is set only if the pipeline type is descheduler.
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -85,7 +90,11 @@ spec:
and decisions made by it.
type: string
knowledges:
- description: Knowledges this step depends on to be ready.
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -131,11 +140,6 @@ spec:
type: object
x-kubernetes-map-type: atomic
type: array
- mandatory:
- default: true
- description: Whether this step is mandatory for the pipeline
- to be runnable.
- type: boolean
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -146,41 +150,142 @@ spec:
can be used
type: object
x-kubernetes-preserve-unknown-fields: true
- type:
- description: The type of the scheduler step.
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: |-
+ Ordered list of filters to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied.
+ items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
type: string
- weigher:
- description: If the type is "weigher", this contains additional
- configuration for it.
- properties:
- disabledValidations:
- description: |-
- The validations to disable for this step. If none are provided, all
- applied validations are enabled.
- properties:
- sameSubjectNumberInOut:
- description: |-
- Whether to validate that no subjects are removed or added from the scheduler
- step. This should only be disabled for scheduler steps that remove subjects.
- Thus, if no value is provided, the default is false.
- type: boolean
- someSubjectsRemain:
- description: |-
- Whether to validate that, after running the step, there are remaining subjects.
- This should only be disabled for scheduler steps that are expected to
- remove all subjects.
- type: boolean
- type: object
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
type: object
+ x-kubernetes-preserve-unknown-fields: true
required:
- - mandatory
- name
- - type
type: object
type: array
+ schedulingDomain:
+ description: |-
+ SchedulingDomain defines in which scheduling domain this pipeline
+ is used (e.g., nova, cinder, manila).
+ type: string
type:
- description: The type of the pipeline.
+ description: |-
+ The type of the pipeline, used to differentiate between
+ filter-weigher and descheduler pipelines within the same
+ scheduling domain.
+
+ If the type is filter-weigher, the filter and weigher attributes
+ must be set. If the type is descheduler, the detectors attribute
+ must be set.
+ enum:
+ - filter-weigher
+ - descheduler
type: string
+ weighers:
+ description: |-
+ Ordered list of weighers to apply in a scheduling pipeline.
+
+ This attribute is set only if the pipeline type is filter-weigher.
+ These weighers are run after filters are applied.
+ items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
+ properties:
+ description:
+ description: |-
+ Additional description of the step which helps understand its purpose
+ and decisions made by it.
+ type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ name:
+ description: |-
+ The name of the scheduler step in the cortex implementation.
+ Must match to a step implemented by the pipeline controller.
+ type: string
+ opts:
+ description: Additional configuration for the extractor that
+ can be used
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - name
+ type: object
+ type: array
required:
- schedulingDomain
- type
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller.go b/internal/scheduling/decisions/cinder/pipeline_controller.go
index 41d7d378e..eb3debc65 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller.go
@@ -146,7 +146,12 @@ func (c *DecisionPipelineController) InitPipeline(
p v1alpha1.Pipeline,
) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
- return lib.NewPipeline(ctx, c.Client, p.Name, supportedSteps, p.Spec.Steps, c.Monitor)
+ return lib.NewFilterWeigherPipeline(
+ ctx, c.Client, p.Name,
+ supportedFilters, p.Spec.Filters,
+ supportedWeighers, p.Spec.Weighers,
+ c.Monitor,
+ )
}
func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index e3a867eb7..a8fbc9598 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -84,7 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -112,7 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -173,7 +175,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
})
if err != nil {
@@ -281,7 +284,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -314,7 +318,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -367,7 +372,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -470,20 +476,26 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-plugin",
+ StepSpec: v1alpha1.StepSpec{Name: "test-plugin"},
+ },
+ },
+ weighers: []v1alpha1.WeigherSpec{
+ {
+ StepSpec: v1alpha1.StepSpec{Name: "test-plugin"},
},
},
expectError: true, // Expected because test-plugin is not in supportedSteps
@@ -499,7 +511,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Steps: tt.steps,
+ Filters: tt.filters,
+ Weighers: tt.weighers,
},
})
diff --git a/internal/scheduling/decisions/cinder/supported_steps.go b/internal/scheduling/decisions/cinder/supported_steps.go
index 307d44a4d..c10ed46a4 100644
--- a/internal/scheduling/decisions/cinder/supported_steps.go
+++ b/internal/scheduling/decisions/cinder/supported_steps.go
@@ -10,6 +10,8 @@ import (
type CinderStep = lib.Step[api.ExternalSchedulerRequest]
-// Configuration of steps supported by the scheduler.
-// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]func() CinderStep{}
+// Configuration of weighers supported by the cinder scheduling.
+var supportedWeighers = map[string]func() CinderStep{}
+
+// Configuration of filters supported by the machine scheduling.
+var supportedFilters = map[string]func() CinderStep{}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller.go b/internal/scheduling/decisions/machines/pipeline_controller.go
index 8da6ed3dc..90cffbf01 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller.go
@@ -188,7 +188,12 @@ func (c *DecisionPipelineController) InitPipeline(
p v1alpha1.Pipeline,
) (lib.Pipeline[ironcore.MachinePipelineRequest], error) {
- return lib.NewPipeline(ctx, c.Client, p.Name, supportedSteps, p.Spec.Steps, c.Monitor)
+ return lib.NewFilterWeigherPipeline(
+ ctx, c.Client, p.Name,
+ supportedFilters, p.Spec.Filters,
+ supportedWeighers, p.Spec.Weighers,
+ c.Monitor,
+ )
}
func (c *DecisionPipelineController) handleMachine() handler.EventHandler {
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 87875bba1..0a2f8b00b 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -211,30 +211,30 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "noop step",
- steps: []v1alpha1.StepSpec{
+ weighers: []v1alpha1.WeigherSpec{
{
- Name: "noop",
- Type: v1alpha1.StepTypeFilter,
+ StepSpec: v1alpha1.StepSpec{Name: "noop"},
},
},
expectError: false,
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Name: "unsupported",
- Type: v1alpha1.StepTypeFilter,
+ StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
},
},
expectError: true,
@@ -250,7 +250,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
- Steps: tt.steps,
+ Filters: tt.filters,
+ Weighers: tt.weighers,
},
})
@@ -318,7 +319,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -351,7 +353,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -397,7 +400,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/machines/supported_steps.go b/internal/scheduling/decisions/machines/supported_steps.go
index cfa9b13eb..9400ee922 100644
--- a/internal/scheduling/decisions/machines/supported_steps.go
+++ b/internal/scheduling/decisions/machines/supported_steps.go
@@ -10,8 +10,10 @@ import (
type MachineStep = lib.Step[ironcore.MachinePipelineRequest]
-// Configuration of steps supported by the scheduling.
-// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]func() MachineStep{
+// Configuration of weighers supported by the machine scheduling.
+var supportedWeighers = map[string]func() MachineStep{
"noop": func() MachineStep { return &NoopFilter{} },
}
+
+// Configuration of filters supported by the machine scheduling.
+var supportedFilters = map[string]func() MachineStep{}
diff --git a/internal/scheduling/decisions/manila/pipeline_controller.go b/internal/scheduling/decisions/manila/pipeline_controller.go
index ac153f2d3..cba5974ea 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller.go
@@ -146,7 +146,12 @@ func (c *DecisionPipelineController) InitPipeline(
p v1alpha1.Pipeline,
) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
- return lib.NewPipeline(ctx, c.Client, p.Name, supportedSteps, p.Spec.Steps, c.Monitor)
+ return lib.NewFilterWeigherPipeline(
+ ctx, c.Client, p.Name,
+ supportedFilters, p.Spec.Filters,
+ supportedWeighers, p.Spec.Weighers,
+ c.Monitor,
+ )
}
func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index 04044911b..b404e0a89 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -84,7 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -112,7 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -277,7 +279,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -310,7 +313,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -363,7 +367,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -466,22 +471,25 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "supported netapp step",
- steps: []v1alpha1.StepSpec{
+ weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeWeigher,
- Name: "netapp_cpu_usage_balancing",
- Opts: runtime.RawExtension{
- Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
+ StepSpec: v1alpha1.StepSpec{
+ Name: "netapp_cpu_usage_balancing",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
+ },
},
},
},
@@ -489,10 +497,11 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "unsupported-plugin",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "unsupported-plugin",
+ },
},
},
expectError: true,
@@ -508,7 +517,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Steps: tt.steps,
+ Filters: tt.filters,
+ Weighers: tt.weighers,
},
})
diff --git a/internal/scheduling/decisions/manila/supported_steps.go b/internal/scheduling/decisions/manila/supported_steps.go
index a9ec7ebd1..4530db6a7 100644
--- a/internal/scheduling/decisions/manila/supported_steps.go
+++ b/internal/scheduling/decisions/manila/supported_steps.go
@@ -11,8 +11,10 @@ import (
type ManilaStep = lib.Step[api.ExternalSchedulerRequest]
-// Configuration of steps supported by the scheduling.
-// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]func() ManilaStep{
+// Configuration of filters supported by the manila scheduler.
+var supportedFilters = map[string]func() ManilaStep{}
+
+// Configuration of weighers supported by the manila scheduler.
+var supportedWeighers = map[string]func() ManilaStep{
"netapp_cpu_usage_balancing": func() ManilaStep { return &weighers.NetappCPUUsageBalancingStep{} },
}
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go
index da088a6e8..dfbd7a249 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller.go
@@ -153,7 +153,12 @@ func (c *DecisionPipelineController) InitPipeline(
p v1alpha1.Pipeline,
) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
- return lib.NewPipeline(ctx, c.Client, p.Name, supportedSteps, p.Spec.Steps, c.Monitor)
+ return lib.NewFilterWeigherPipeline(
+ ctx, c.Client, p.Name,
+ supportedFilters, p.Spec.Filters,
+ supportedWeighers, p.Spec.Weighers,
+ c.Monitor,
+ )
}
func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 6f704ce01..dbcf3929b 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -92,7 +92,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -120,7 +121,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -171,7 +173,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -261,42 +264,43 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "supported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "filter_status_conditions",
+ StepSpec: v1alpha1.StepSpec{Name: "filter_status_conditions"},
},
},
expectError: false,
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "unsupported-plugin",
+ StepSpec: v1alpha1.StepSpec{Name: "unsupported-plugin"},
},
},
expectError: true,
},
{
name: "step with scoping options",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
- Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
+ StepSpec: v1alpha1.StepSpec{
+ Name: "filter_status_conditions",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
+ },
},
},
},
@@ -304,12 +308,13 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with invalid scoping options",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
- Raw: []byte(`invalid json`),
+ StepSpec: v1alpha1.StepSpec{
+ Name: "filter_status_conditions",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`invalid json`),
+ },
},
},
},
@@ -324,7 +329,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "test-pipeline",
},
Spec: v1alpha1.PipelineSpec{
- Steps: tt.steps,
+ Filters: tt.filters,
+ Weighers: tt.weighers,
},
})
@@ -416,7 +422,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -427,7 +434,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -462,7 +470,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -473,7 +482,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -532,7 +542,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -543,7 +554,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -580,7 +592,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -617,7 +630,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_steps.go
index 3a223f5fc..45699744c 100644
--- a/internal/scheduling/decisions/nova/supported_steps.go
+++ b/internal/scheduling/decisions/nova/supported_steps.go
@@ -12,27 +12,30 @@ import (
type NovaStep = lib.Step[api.ExternalSchedulerRequest]
-// Configuration of steps supported by the scheduling.
-// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]func() NovaStep{
+// Configuration of filters supported by the nova scheduler.
+var supportedFilters = map[string]func() NovaStep{
+ "filter_has_accelerators": func() NovaStep { return &filters.FilterHasAcceleratorsStep{} },
+ "filter_correct_az": func() NovaStep { return &filters.FilterCorrectAZStep{} },
+ "filter_status_conditions": func() NovaStep { return &filters.FilterStatusConditionsStep{} },
+ "filter_maintenance": func() NovaStep { return &filters.FilterMaintenanceStep{} },
+ "filter_packed_virtqueue": func() NovaStep { return &filters.FilterPackedVirtqueueStep{} },
+ "filter_external_customer": func() NovaStep { return &filters.FilterExternalCustomerStep{} },
+ "filter_allowed_projects": func() NovaStep { return &filters.FilterAllowedProjectsStep{} },
+ "filter_capabilities": func() NovaStep { return &filters.FilterCapabilitiesStep{} },
+ "filter_has_requested_traits": func() NovaStep { return &filters.FilterHasRequestedTraits{} },
+ "filter_has_enough_capacity": func() NovaStep { return &filters.FilterHasEnoughCapacity{} },
+ "filter_host_instructions": func() NovaStep { return &filters.FilterHostInstructionsStep{} },
+ "filter_instance_group_affinity": func() NovaStep { return &filters.FilterInstanceGroupAffinityStep{} },
+ "filter_instance_group_anti_affinity": func() NovaStep { return &filters.FilterInstanceGroupAntiAffinityStep{} },
+ "filter_live_migratable": func() NovaStep { return &filters.FilterLiveMigratableStep{} },
+ "filter_requested_destination": func() NovaStep { return &filters.FilterRequestedDestinationStep{} },
+}
+
+// Configuration of weighers supported by the nova scheduler.
+var supportedWeighers = map[string]func() NovaStep{
"vmware_anti_affinity_noisy_projects": func() NovaStep { return &weighers.VMwareAntiAffinityNoisyProjectsStep{} },
"vmware_avoid_long_term_contended_hosts": func() NovaStep { return &weighers.VMwareAvoidLongTermContendedHostsStep{} },
"vmware_avoid_short_term_contended_hosts": func() NovaStep { return &weighers.VMwareAvoidShortTermContendedHostsStep{} },
"vmware_hana_binpacking": func() NovaStep { return &weighers.VMwareHanaBinpackingStep{} },
"vmware_general_purpose_balancing": func() NovaStep { return &weighers.VMwareGeneralPurposeBalancingStep{} },
- "filter_has_accelerators": func() NovaStep { return &filters.FilterHasAcceleratorsStep{} },
- "filter_correct_az": func() NovaStep { return &filters.FilterCorrectAZStep{} },
- "filter_status_conditions": func() NovaStep { return &filters.FilterStatusConditionsStep{} },
- "filter_maintenance": func() NovaStep { return &filters.FilterMaintenanceStep{} },
- "filter_packed_virtqueue": func() NovaStep { return &filters.FilterPackedVirtqueueStep{} },
- "filter_external_customer": func() NovaStep { return &filters.FilterExternalCustomerStep{} },
- "filter_allowed_projects": func() NovaStep { return &filters.FilterAllowedProjectsStep{} },
- "filter_capabilities": func() NovaStep { return &filters.FilterCapabilitiesStep{} },
- "filter_has_requested_traits": func() NovaStep { return &filters.FilterHasRequestedTraits{} },
- "filter_has_enough_capacity": func() NovaStep { return &filters.FilterHasEnoughCapacity{} },
- "filter_host_instructions": func() NovaStep { return &filters.FilterHostInstructionsStep{} },
- "filter_instance_group_affinity": func() NovaStep { return &filters.FilterInstanceGroupAffinityStep{} },
- "filter_instance_group_anti_affinity": func() NovaStep { return &filters.FilterInstanceGroupAntiAffinityStep{} },
- "filter_live_migratable": func() NovaStep { return &filters.FilterLiveMigratableStep{} },
- "filter_requested_destination": func() NovaStep { return &filters.FilterRequestedDestinationStep{} },
}
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index 63a143d2c..fbb8f84d0 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -199,7 +199,12 @@ func (c *DecisionPipelineController) InitPipeline(
p v1alpha1.Pipeline,
) (lib.Pipeline[pods.PodPipelineRequest], error) {
- return lib.NewPipeline(ctx, c.Client, p.Name, supportedSteps, p.Spec.Steps, c.Monitor)
+ return lib.NewFilterWeigherPipeline(
+ ctx, c.Client, p.Name,
+ supportedFilters, p.Spec.Filters,
+ supportedWeighers, p.Spec.Weighers,
+ c.Monitor,
+ )
}
func (c *DecisionPipelineController) handlePod() handler.EventHandler {
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 4a46e3a80..fbe493f4a 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -186,30 +186,30 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "noop step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Name: "noop",
- Type: v1alpha1.StepTypeFilter,
+ StepSpec: v1alpha1.StepSpec{Name: "noop"},
},
},
expectError: false,
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
- Name: "unsupported",
- Type: v1alpha1.StepTypeFilter,
+ StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
},
},
expectError: true,
@@ -223,7 +223,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "test-pipeline",
},
Spec: v1alpha1.PipelineSpec{
- Steps: tt.steps,
+ Filters: tt.filters,
+ Weighers: tt.weighers,
},
})
@@ -291,7 +292,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -324,7 +326,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: false,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -370,7 +373,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/pods/supported_steps.go b/internal/scheduling/decisions/pods/supported_steps.go
index ea0017c53..cbb27c4f4 100644
--- a/internal/scheduling/decisions/pods/supported_steps.go
+++ b/internal/scheduling/decisions/pods/supported_steps.go
@@ -12,12 +12,15 @@ import (
type PodStep = lib.Step[pods.PodPipelineRequest]
-// Configuration of steps supported by the scheduling.
-// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]func() PodStep{
+// Configuration of filters supported by the pods scheduler.
+var supportedFilters = map[string]func() PodStep{
"noop": func() PodStep { return &filters.NoopFilter{} },
"taint": func() PodStep { return &filters.TaintFilter{} },
"nodeaffinity": func() PodStep { return &filters.NodeAffinityFilter{} },
"nodecapacity": func() PodStep { return &filters.NodeCapacityFilter{} },
- "binpack": func() PodStep { return &weighers.BinpackingStep{} },
+}
+
+// Configuration of weighers supported by the pods scheduler.
+var supportedWeighers = map[string]func() PodStep{
+ "binpack": func() PodStep { return &weighers.BinpackingStep{} },
}
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index d1b9ac0da..6fd248321 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -82,8 +82,8 @@ type StepMonitor struct {
descheduledCounter prometheus.Counter
}
-// Monitor a step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
+// Monitor a descheduler step by wrapping it with a StepMonitor.
+func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -101,7 +101,7 @@ func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return m.step.Init(ctx, client, step)
}
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index 1f8e658de..5776acad0 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -80,7 +80,7 @@ type mockMonitorStep struct {
runCalled bool
}
-func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
m.initCalled = true
return m.initError
}
@@ -97,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index d1c3445cf..08c26213d 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -33,7 +33,7 @@ type Pipeline struct {
func (p *Pipeline) Init(
ctx context.Context,
- confedSteps []v1alpha1.StepSpec,
+ confedSteps []v1alpha1.DetectorSpec,
supportedSteps map[string]Step,
) error {
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index 4a357a5ed..0cae5eaff 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -52,7 +52,7 @@ func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- err := pipeline.Init(ctx, p.Spec.Steps, supportedSteps)
+ err := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
return pipeline, err
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 74d741647..e110e3914 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -33,34 +33,31 @@ type mockControllerStep struct{}
func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
return nil, nil
}
-func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return nil
}
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ steps []v1alpha1.DetectorSpec
expectError bool
expectedError string
}{
{
name: "successful pipeline initialization",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
- Type: v1alpha1.StepTypeDescheduler,
- Name: "mock-step",
+ StepSpec: v1alpha1.StepSpec{Name: "mock-step"},
},
},
expectError: false,
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
-
- Type: v1alpha1.StepTypeDescheduler,
- Name: "unsupported",
+ StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
},
},
expectError: true,
@@ -68,7 +65,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ steps: []v1alpha1.DetectorSpec{},
expectError: false,
},
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index b7a3472c3..476fc2de9 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -30,7 +30,7 @@ func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
return m.decisions, nil
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
if m.initError != nil {
return m.initError
}
@@ -42,7 +42,7 @@ func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
supportedSteps map[string]Step
- confedSteps []v1alpha1.StepSpec
+ confedSteps []v1alpha1.DetectorSpec
expectedSteps int
expectedError bool
}{
@@ -51,9 +51,8 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
- Name: "test-step",
- Type: v1alpha1.StepTypeDescheduler,
+ confedSteps: []v1alpha1.DetectorSpec{{
+ StepSpec: v1alpha1.StepSpec{Name: "test-step"},
}},
expectedSteps: 1,
},
@@ -62,9 +61,8 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
- Name: "unsupported-step",
- Type: v1alpha1.StepTypeDescheduler,
+ confedSteps: []v1alpha1.DetectorSpec{{
+ StepSpec: v1alpha1.StepSpec{Name: "unsupported-step"},
}},
expectedError: true,
},
@@ -73,9 +71,8 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
- confedSteps: []v1alpha1.StepSpec{{
- Name: "failing-step",
- Type: v1alpha1.StepTypeDescheduler,
+ confedSteps: []v1alpha1.DetectorSpec{{
+ StepSpec: v1alpha1.StepSpec{Name: "failing-step"},
}},
expectedError: true,
},
@@ -85,14 +82,12 @@ func TestPipeline_Init(t *testing.T) {
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{
+ confedSteps: []v1alpha1.DetectorSpec{
{
- Name: "step1",
- Type: v1alpha1.StepTypeDescheduler,
+ StepSpec: v1alpha1.StepSpec{Name: "step1"},
},
{
- Name: "step2",
- Type: v1alpha1.StepTypeDescheduler,
+ StepSpec: v1alpha1.StepSpec{Name: "step2"},
},
},
expectedSteps: 2,
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index f312f402b..c1b6ea902 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -11,9 +11,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-// Common base for all steps that provides some functionality
+// Common base for all descheduler steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[Opts any] struct {
+type Detector[Opts any] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The kubernetes client to use.
@@ -21,7 +21,7 @@ type BaseStep[Opts any] struct {
}
// Init the step with the database and options.
-func (s *BaseStep[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index f646523bb..4f01dfc32 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -20,14 +20,16 @@ func (o MockOptions) Validate() error {
return nil
}
-func TestBaseStep_Init(t *testing.T) {
- step := BaseStep[MockOptions]{}
+func TestDetector_Init(t *testing.T) {
+ step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
- err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
- Opts: runtime.RawExtension{Raw: []byte(`{
- "option1": "value1",
- "option2": 2
- }`)},
+ err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
+ StepSpec: v1alpha1.StepSpec{
+ Opts: runtime.RawExtension{Raw: []byte(`{
+ "option1": "value1",
+ "option2": 2
+ }`)},
+ },
})
if err != nil {
t.Fatalf("expected no error, got %v", err)
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
index bd8e51dae..348ee3249 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
@@ -20,8 +20,8 @@ type AvoidHighStealPctStepOpts struct {
}
type AvoidHighStealPctStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- plugins.BaseStep[AvoidHighStealPctStepOpts]
+ // Detector is a helper struct that provides common functionality for all descheduler steps.
+ plugins.Detector[AvoidHighStealPctStepOpts]
}
func (s *AvoidHighStealPctStep) Run() ([]plugins.Decision, error) {
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/step.go
index 7c53bc991..552edf87b 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/step.go
@@ -21,5 +21,5 @@ type Step interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 3fde3f226..1f10afa47 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -27,10 +27,14 @@ type pipeline[RequestType PipelineRequest] struct {
// The activation function to use when combining the
// results of the scheduler steps.
ActivationFunction
- // The order in which scheduler steps are applied, by their step name.
- order []string
- // The steps by their name.
- steps map[string]Step[RequestType]
+ // The order in which filters are applied, by their step name.
+ filtersOrder []string
+ // The filters by their name.
+ filters map[string]Step[RequestType]
+ // The order in which weighers are applied, by their step name.
+ weighersOrder []string
+ // The weighers by their name.
+ weighers map[string]Step[RequestType]
// Monitor to observe the pipeline.
monitor PipelineMonitor
}
@@ -42,76 +46,135 @@ type StepWrapper[RequestType PipelineRequest] func(
impl Step[RequestType],
) (Step[RequestType], error)
-// Create a new pipeline with steps contained in the configuration.
-func NewPipeline[RequestType PipelineRequest](
+// Create a new pipeline with filters and weighers contained in the configuration.
+func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
- supportedSteps map[string]func() Step[RequestType],
- confedSteps []v1alpha1.StepSpec,
+ supportedFilters map[string]func() Step[RequestType],
+ confedFilters []v1alpha1.FilterSpec,
+ supportedWeighers map[string]func() Step[RequestType],
+ confedWeighers []v1alpha1.WeigherSpec,
monitor PipelineMonitor,
) (Pipeline[RequestType], error) {
- // Load all steps from the configuration.
- stepsByName := make(map[string]Step[RequestType], len(confedSteps))
- order := []string{}
-
pipelineMonitor := monitor.SubPipeline(name)
- for _, stepConfig := range confedSteps {
- slog.Info("scheduler: configuring step", "name", stepConfig.Name)
- slog.Info("supported:", "steps", maps.Keys(supportedSteps))
- makeStep, ok := supportedSteps[stepConfig.Name]
+ // Ensure there are no overlaps between filter and weigher names.
+ for filterName := range supportedFilters {
+ if _, ok := supportedWeighers[filterName]; ok {
+ return nil, errors.New("step name overlap between filters and weighers: " + filterName)
+ }
+ }
+
+ // Load all filters from the configuration.
+ filtersByName := make(map[string]Step[RequestType], len(confedFilters))
+ filtersOrder := []string{}
+ for _, filterConfig := range confedFilters {
+ slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
+ slog.Info("supported:", "filters", maps.Keys(supportedFilters))
+ makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return nil, errors.New("unsupported scheduler step name: " + stepConfig.Name)
+ return nil, errors.New("unsupported filter name: " + filterConfig.Name)
}
- step := makeStep()
- if stepConfig.Type == v1alpha1.StepTypeWeigher && stepConfig.Weigher != nil {
- step = validateStep(step, stepConfig.Weigher.DisabledValidations)
+ filter := makeFilter()
+ filter = monitorStep(ctx, client, filterConfig.StepSpec, filter, pipelineMonitor)
+ if err := filter.Init(ctx, client, filterConfig.StepSpec); err != nil {
+ return nil, errors.New("failed to initialize filter: " + err.Error())
}
- step = monitorStep(ctx, client, stepConfig, step, pipelineMonitor)
- if err := step.Init(ctx, client, stepConfig); err != nil {
+ filtersByName[filterConfig.Name] = filter
+ filtersOrder = append(filtersOrder, filterConfig.Name)
+ slog.Info("scheduler: added filter", "name", filterConfig.Name)
+ }
+
+ // Load all weighers from the configuration.
+ weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
+ weighersOrder := []string{}
+ for _, weigherConfig := range confedWeighers {
+ slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
+ slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
+ makeWeigher, ok := supportedWeighers[weigherConfig.Name]
+ if !ok {
+ return nil, errors.New("unsupported weigher name: " + weigherConfig.Name)
+ }
+ weigher := makeWeigher()
+ weigher = validateWeigher(weigher)
+ weigher = monitorStep(ctx, client, weigherConfig.StepSpec, weigher, pipelineMonitor)
+ if err := weigher.Init(ctx, client, weigherConfig.StepSpec); err != nil {
return nil, errors.New("failed to initialize pipeline step: " + err.Error())
}
- stepsByName[stepConfig.Name] = step
- order = append(order, stepConfig.Name)
- slog.Info(
- "scheduler: added step",
- "name", stepConfig.Name,
- )
+ weighersByName[weigherConfig.Name] = weigher
+ weighersOrder = append(weighersOrder, weigherConfig.Name)
+ slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
}
+
return &pipeline[RequestType]{
- // All steps can be run in parallel.
- order: order,
- steps: stepsByName,
- monitor: pipelineMonitor,
+ filtersOrder: filtersOrder,
+ filters: filtersByName,
+ weighersOrder: weighersOrder,
+ weighers: weighersByName,
+ monitor: pipelineMonitor,
}, nil
}
-// Execute the scheduler steps in groups of the execution order.
-// The steps are run in parallel.
-func (p *pipeline[RequestType]) runSteps(log *slog.Logger, request RequestType) map[string]map[string]float64 {
- var lock sync.Mutex
+// Execute filters and collect their activations by step name.
+// During this process, the request is mutated to only include the
+// remaining subjects.
+func (p *pipeline[RequestType]) runFilters(
+ log *slog.Logger,
+ request RequestType,
+) (filteredRequest RequestType) {
+
+ filteredRequest = request
+ for _, filterName := range p.filtersOrder {
+ filter := p.filters[filterName]
+ stepLog := log.With("filter", filterName)
+ stepLog.Info("scheduler: running filter")
+ result, err := filter.Run(stepLog, filteredRequest)
+ if errors.Is(err, ErrStepSkipped) {
+ stepLog.Info("scheduler: filter skipped")
+ continue
+ }
+ if err != nil {
+ stepLog.Error("scheduler: failed to run filter", "error", err)
+ continue
+ }
+ stepLog.Info("scheduler: finished filter")
+ // Mutate the request to only include the remaining subjects.
+ // Assume the resulting request type is the same as the input type.
+ filteredRequest = filteredRequest.FilterSubjects(result.Activations).(RequestType)
+ }
+ return filteredRequest
+}
+
+// Execute weighers and collect their activations by step name.
+func (p *pipeline[RequestType]) runWeighers(
+ log *slog.Logger,
+ filteredRequest RequestType,
+) map[string]map[string]float64 {
+
activationsByStep := map[string]map[string]float64{}
+ // Weighers can be run in parallel as they do not modify the request.
+ var lock sync.Mutex
var wg sync.WaitGroup
- for _, stepName := range p.order {
- step := p.steps[stepName]
+ for _, weigherName := range p.weighersOrder {
+ weigher := p.weighers[weigherName]
wg.Go(func() {
- stepLog := log.With("stepName", stepName)
- stepLog.Info("scheduler: running step")
- result, err := step.Run(stepLog, request)
+ stepLog := log.With("weigher", weigherName)
+ stepLog.Info("scheduler: running weigher")
+ result, err := weigher.Run(stepLog, filteredRequest)
if errors.Is(err, ErrStepSkipped) {
- stepLog.Info("scheduler: step skipped")
+ stepLog.Info("scheduler: weigher skipped")
return
}
if err != nil {
- stepLog.Error("scheduler: failed to run step", "error", err)
+ stepLog.Error("scheduler: failed to run weigher", "error", err)
return
}
- stepLog.Info("scheduler: finished step")
+ stepLog.Info("scheduler: finished weigher")
lock.Lock()
defer lock.Unlock()
- activationsByStep[stepName] = result.Activations
+ activationsByStep[weigherName] = result.Activations
})
}
wg.Wait()
@@ -134,7 +197,7 @@ func (p *pipeline[RequestType]) normalizeInputWeights(weights map[string]float64
}
// Apply the step weights to the input weights.
-func (p *pipeline[RequestType]) applyStepWeights(
+func (p *pipeline[RequestType]) applyWeights(
stepWeights map[string]map[string]float64,
inWeights map[string]float64,
) map[string]float64 {
@@ -143,13 +206,13 @@ func (p *pipeline[RequestType]) applyStepWeights(
maps.Copy(outWeights, inWeights)
// Apply all activations in the strict order defined by the configuration.
- for _, stepName := range p.order {
- stepActivations, ok := stepWeights[stepName]
+ for _, weigherName := range p.weighersOrder {
+ weigherActivations, ok := stepWeights[weigherName]
if !ok {
// This is ok, since steps can be skipped.
continue
}
- outWeights = p.Apply(outWeights, stepActivations)
+ outWeights = p.Apply(outWeights, weigherActivations)
}
return outWeights
}
@@ -176,15 +239,27 @@ func (p *pipeline[RequestType]) Run(request RequestType) (v1alpha1.DecisionResul
subjectsIn := request.GetSubjects()
traceLog.Info("scheduler: starting pipeline", "subjects", subjectsIn)
- // Get weights from the scheduler steps, apply them to the input weights, and
- // sort the subjects by their weights. The input weights are normalized before
- // applying the step weights.
- stepWeights := p.runSteps(traceLog, request)
- traceLog.Info("scheduler: finished pipeline")
+ // Normalize the input weights so we can apply step weights meaningfully.
inWeights := p.normalizeInputWeights(request.GetWeights())
traceLog.Info("scheduler: input weights", "weights", inWeights)
- outWeights := p.applyStepWeights(stepWeights, inWeights)
+
+ // Run filters first to reduce the number of subjects.
+ // Any weights assigned to filtered out subjects are ignored.
+ filteredRequest := p.runFilters(traceLog, request)
+ traceLog.Info(
+ "scheduler: finished filters",
+ "remainingSubjects", filteredRequest.GetSubjects(),
+ )
+
+ // Run weighers on the filtered subjects.
+ remainingWeights := make(map[string]float64, len(filteredRequest.GetSubjects()))
+ for _, subject := range filteredRequest.GetSubjects() {
+ remainingWeights[subject] = inWeights[subject]
+ }
+ stepWeights := p.runWeighers(traceLog, filteredRequest)
+ outWeights := p.applyWeights(stepWeights, remainingWeights)
traceLog.Info("scheduler: output weights", "weights", outWeights)
+
subjects := p.sortSubjectsByWeights(outWeights)
traceLog.Info("scheduler: sorted subjects", "subjects", subjects)
@@ -200,13 +275,5 @@ func (p *pipeline[RequestType]) Run(request RequestType) (v1alpha1.DecisionResul
if len(subjects) > 0 {
result.TargetHost = &subjects[0]
}
- for _, stepName := range p.order {
- if activations, ok := stepWeights[stepName]; ok {
- result.StepResults = append(result.StepResults, v1alpha1.StepResult{
- StepName: stepName,
- Activations: activations,
- })
- }
- }
return result, nil
}
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 36a46f78e..50e3af497 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
@@ -89,28 +90,21 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
old := obj.DeepCopy()
// Check if all steps are ready. If not, check if the step is mandatory.
- obj.Status.TotalSteps = len(obj.Spec.Steps)
+ obj.Status.TotalSteps = len(obj.Spec.Filters) + len(obj.Spec.Weighers) + len(obj.Spec.Detectors)
obj.Status.ReadySteps = 0
- for _, step := range obj.Spec.Steps {
- err := c.checkStepReady(ctx, &step)
- if err == nil {
+ for range obj.Spec.Filters { // Could use len() directly but want to keep the pattern.
+ // If needed, check if this filter needs any dependencies. For now,
+ // as filters do not depend on knowledges, we skip this.
+ obj.Status.ReadySteps++
+ }
+ for _, detector := range obj.Spec.Detectors {
+ if err := c.checkAllKnowledgesReady(ctx, detector.Knowledges); err == nil {
obj.Status.ReadySteps++
- continue
}
- if step.Mandatory {
- meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
- Type: v1alpha1.PipelineConditionReady,
- Status: metav1.ConditionFalse,
- Reason: "MandatoryStepNotReady",
- Message: fmt.Sprintf("mandatory step %s not ready: %s", step.Name, err.Error()),
- })
- patch := client.MergeFrom(old)
- if err := c.Status().Patch(ctx, obj, patch); err != nil {
- log.Error(err, "failed to patch pipeline status", "pipelineName", obj.Name)
- }
- delete(c.Pipelines, obj.Name)
- delete(c.PipelineConfigs, obj.Name)
- return
+ }
+ for _, weigher := range obj.Spec.Weighers {
+ if err := c.checkAllKnowledgesReady(ctx, weigher.Knowledges); err == nil {
+ obj.Status.ReadySteps++
}
}
obj.Status.StepsReadyFrac = fmt.Sprintf("%d/%d", obj.Status.ReadySteps, obj.Status.TotalSteps)
@@ -190,32 +184,32 @@ func (c *BasePipelineController[PipelineType]) HandlePipelineDeleted(
delete(c.PipelineConfigs, pipelineConf.Name)
}
-// Check if a step is ready, and if not, return an error indicating why not.
-func (c *BasePipelineController[PipelineType]) checkStepReady(
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (c *BasePipelineController[PipelineType]) checkAllKnowledgesReady(
ctx context.Context,
- obj *v1alpha1.StepSpec,
+ objects []corev1.ObjectReference,
) error {
log := ctrl.LoggerFrom(ctx)
// Check the status of all knowledges depending on this step.
readyKnowledges := 0
- totalKnowledges := len(obj.Knowledges)
- for _, knowledgeRef := range obj.Knowledges {
+ totalKnowledges := len(objects)
+ for _, objRef := range objects {
knowledge := &v1alpha1.Knowledge{}
if err := c.Get(ctx, client.ObjectKey{
- Name: knowledgeRef.Name,
- Namespace: knowledgeRef.Namespace,
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
}, knowledge); err != nil {
- log.Error(err, "failed to get knowledge depending on step", "knowledgeName", knowledgeRef.Name)
+ log.Error(err, "failed to get knowledge depending on step", "knowledgeName", objRef.Name)
continue
}
// Check if the knowledge status conditions indicate an error.
if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- log.Info("knowledge not ready due to error condition", "knowledgeName", knowledgeRef.Name)
+ log.Info("knowledge not ready due to error condition", "knowledgeName", objRef.Name)
continue
}
if knowledge.Status.RawLength == 0 {
- log.Info("knowledge not ready, no data available", "knowledgeName", knowledgeRef.Name)
+ log.Info("knowledge not ready, no data available", "knowledgeName", objRef.Name)
continue
}
readyKnowledges++
@@ -249,7 +243,17 @@ func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
}
for _, pipeline := range pipelines.Items {
needsUpdate := false
- for _, step := range pipeline.Spec.Steps {
+ // For filter-weigher pipelines, only weighers may depend on knowledges.
+ for _, step := range pipeline.Spec.Weighers {
+ for _, knowledgeRef := range step.Knowledges {
+ if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
+ needsUpdate = true
+ break
+ }
+ }
+ }
+ // Check descheduler pipelines where detectors may depend on knowledges.
+ for _, step := range pipeline.Spec.Detectors {
for _, knowledgeRef := range step.Knowledges {
if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
needsUpdate = true
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 034186cbc..e2706b8de 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -72,7 +72,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -91,7 +92,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -101,7 +103,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -111,7 +114,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeDescheduler,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -121,7 +125,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -197,14 +202,14 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Filters: []v1alpha1.FilterSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Mandatory: true,
- Knowledges: []corev1.ObjectReference{
- {Name: "knowledge-1", Namespace: "default"},
- },
+ StepSpec: v1alpha1.StepSpec{Name: "test-filter"},
+ },
+ },
+ Weighers: []v1alpha1.WeigherSpec{
+ {
+ StepSpec: v1alpha1.StepSpec{Name: "test-weigher"},
},
},
},
@@ -227,32 +232,6 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
expectReady: true,
expectInMap: true,
},
- {
- name: "pipeline with mandatory step not ready",
- pipeline: &v1alpha1.Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-pipeline-not-ready",
- },
- Spec: v1alpha1.PipelineSpec{
- SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
- {
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Mandatory: true,
- Knowledges: []corev1.ObjectReference{
- {Name: "missing-knowledge", Namespace: "default"},
- },
- },
- },
- },
- },
- knowledges: []v1alpha1.Knowledge{},
- schedulingDomain: v1alpha1.SchedulingDomainNova,
- expectReady: false,
- expectInMap: false,
- },
{
name: "pipeline with optional step not ready",
pipeline: &v1alpha1.Pipeline{
@@ -262,11 +241,9 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Mandatory: false,
+ StepSpec: v1alpha1.StepSpec{Name: "test-weigher"},
Knowledges: []corev1.ObjectReference{
{Name: "missing-knowledge", Namespace: "default"},
},
@@ -288,7 +265,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -306,7 +283,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -314,51 +291,6 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
expectReady: false,
expectInMap: false,
},
- {
- name: "pipeline with knowledge in error state",
- pipeline: &v1alpha1.Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-pipeline-knowledge-error",
- },
- Spec: v1alpha1.PipelineSpec{
- SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
- {
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Mandatory: true,
- Knowledges: []corev1.ObjectReference{
- {Name: "error-knowledge", Namespace: "default"},
- },
- },
- },
- },
- },
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "error-knowledge",
- Namespace: "default",
- },
- Spec: v1alpha1.KnowledgeSpec{
- SchedulingDomain: v1alpha1.SchedulingDomainNova,
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- Conditions: []metav1.Condition{
- {
- Type: v1alpha1.KnowledgeConditionReady,
- Status: metav1.ConditionFalse,
- },
- },
- },
- },
- },
- schedulingDomain: v1alpha1.SchedulingDomainNova,
- expectReady: false,
- expectInMap: false,
- },
}
for _, tt := range tests {
@@ -429,7 +361,8 @@ func TestBasePipelineController_HandlePipelineCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -473,7 +406,8 @@ func TestBasePipelineController_HandlePipelineUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -547,7 +481,7 @@ func TestBasePipelineController_HandlePipelineDeleted(t *testing.T) {
}
}
-func TestBasePipelineController_checkStepReady(t *testing.T) {
+func TestBasePipelineController_checkAllKnowledgesReady(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -555,29 +489,16 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
tests := []struct {
name string
- step v1alpha1.StepSpec
knowledges []v1alpha1.Knowledge
expectError bool
}{
{
- name: "step with no knowledge dependencies",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{},
- },
+ name: "no knowledges",
knowledges: []v1alpha1.Knowledge{},
expectError: false,
},
{
- name: "step with ready knowledge",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "ready-knowledge", Namespace: "default"},
- },
- },
+ name: "ready knowledge",
knowledges: []v1alpha1.Knowledge{
{
ObjectMeta: metav1.ObjectMeta{
@@ -592,14 +513,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
expectError: false,
},
{
- name: "step with knowledge in error state",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "error-knowledge", Namespace: "default"},
- },
- },
+ name: "knowledge in error state",
knowledges: []v1alpha1.Knowledge{
{
ObjectMeta: metav1.ObjectMeta{
@@ -619,14 +533,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
expectError: true,
},
{
- name: "step with knowledge with no data",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "no-data-knowledge", Namespace: "default"},
- },
- },
+ name: "knowledge with no data",
knowledges: []v1alpha1.Knowledge{
{
ObjectMeta: metav1.ObjectMeta{
@@ -641,27 +548,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
expectError: true,
},
{
- name: "step with missing knowledge",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "missing-knowledge", Namespace: "default"},
- },
- },
- knowledges: []v1alpha1.Knowledge{},
- expectError: true,
- },
- {
- name: "step with multiple knowledges, all ready",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "knowledge-1", Namespace: "default"},
- {Name: "knowledge-2", Namespace: "default"},
- },
- },
+ name: "multiple knowledges, all ready",
knowledges: []v1alpha1.Knowledge{
{
ObjectMeta: metav1.ObjectMeta{
@@ -685,15 +572,7 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
expectError: false,
},
{
- name: "step with multiple knowledges, some not ready",
- step: v1alpha1.StepSpec{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Knowledges: []corev1.ObjectReference{
- {Name: "ready-knowledge", Namespace: "default"},
- {Name: "not-ready-knowledge", Namespace: "default"},
- },
- },
+ name: "multiple knowledges, some not ready",
knowledges: []v1alpha1.Knowledge{
{
ObjectMeta: metav1.ObjectMeta{
@@ -734,7 +613,14 @@ func TestBasePipelineController_checkStepReady(t *testing.T) {
Client: fakeClient,
}
- err := controller.checkStepReady(context.Background(), &tt.step)
+ objectReferences := make([]corev1.ObjectReference, len(tt.knowledges))
+ for i, k := range tt.knowledges {
+ objectReferences[i] = corev1.ObjectReference{
+ Name: k.Name,
+ Namespace: k.Namespace,
+ }
+ }
+ err := controller.checkAllKnowledgesReady(context.Background(), objectReferences)
if tt.expectError && err == nil {
t.Error("Expected error but got none")
@@ -781,10 +667,11 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -799,10 +686,11 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "other-knowledge", Namespace: "default"},
},
@@ -833,10 +721,11 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -911,10 +800,11 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1063,10 +953,11 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1132,11 +1023,11 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Steps: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
- Type: v1alpha1.StepTypeFilter,
- Name: "test-filter",
- Mandatory: true,
+ StepSpec: v1alpha1.StepSpec{
+ Name: "test-weigher",
+ },
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1169,10 +1060,8 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
controller.HandleKnowledgeDeleted(context.Background(), evt, nil)
- // When knowledge is deleted, the pipeline is re-evaluated.
- // Since the knowledge is now missing and the step is mandatory,
- // the pipeline should be removed from the map.
- if _, exists := controller.Pipelines[pipeline.Name]; exists {
- t.Error("Expected pipeline to be removed after knowledge deletion due to mandatory step")
+ // Check that the pipeline was re-evaluated and is still in the map
+ if _, exists := controller.Pipelines[pipeline.Name]; !exists {
+ t.Error("Expected pipeline to be re-evaluated after knowledge deletion")
}
}
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index 9a8651384..f32c5377b 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -13,16 +13,34 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockPipelineStep struct {
+type mockFilter struct {
err error
name string
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
-func (m *mockPipelineStep) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+func (m *mockFilter) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ if m.err != nil {
+ return nil, m.err
+ }
+ return &StepResult{
+ Activations: map[string]float64{"host1": 0.0, "host2": 0.0},
+ }, nil
+}
+
+type mockWeigher struct {
+ err error
+ name string
+}
+
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ return nil
+}
+
+func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
if m.err != nil {
return nil, m.err
}
@@ -34,14 +52,18 @@ func (m *mockPipelineStep) Run(traceLog *slog.Logger, request mockPipelineReques
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &pipeline[mockPipelineRequest]{
- steps: map[string]Step[mockPipelineRequest]{
- "mock_pipeline_step": &mockPipelineStep{
- name: "mock_pipeline_step",
+ filters: map[string]Step[mockPipelineRequest]{
+ "mock_filter": &mockFilter{
+ name: "mock_filter",
},
},
- order: []string{
- "mock_pipeline_step",
+ filtersOrder: []string{"mock_filter"},
+ weighers: map[string]Step[mockPipelineRequest]{
+ "mock_weigher": &mockWeigher{
+ name: "mock_weigher",
+ },
},
+ weighersOrder: []string{"mock_weigher"},
}
tests := []struct {
@@ -114,8 +136,8 @@ func TestPipeline_NormalizeNovaWeights(t *testing.T) {
func TestPipeline_ApplyStepWeights(t *testing.T) {
p := &pipeline[mockPipelineRequest]{
- steps: map[string]Step[mockPipelineRequest]{},
- order: []string{"step1", "step2"},
+ weighers: map[string]Step[mockPipelineRequest]{},
+ weighersOrder: []string{"step1", "step2"},
}
tests := []struct {
@@ -143,7 +165,7 @@ func TestPipeline_ApplyStepWeights(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- result := p.applyStepWeights(tt.stepWeights, tt.inWeights)
+ result := p.applyWeights(tt.stepWeights, tt.inWeights)
for host, weight := range tt.expectedResult {
if result[host] != weight {
t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host])
@@ -184,32 +206,26 @@ func TestPipeline_SortHostsByWeights(t *testing.T) {
}
}
-func TestPipeline_RunSteps(t *testing.T) {
- mockStep := &mockPipelineStep{
- name: "mock_pipeline_step",
+func TestPipeline_RunFilters(t *testing.T) {
+ mockStep := &mockFilter{
+ name: "mock_filter",
}
p := &pipeline[mockPipelineRequest]{
- order: []string{
- "mock_pipeline_step",
+ filtersOrder: []string{
+ "mock_filter",
},
- steps: map[string]Step[mockPipelineRequest]{
- "mock_pipeline_step": mockStep,
+ filters: map[string]Step[mockPipelineRequest]{
+ "mock_filter": mockStep,
},
}
request := mockPipelineRequest{
Subjects: []string{"host1", "host2"},
- Weights: map[string]float64{"host1": 0.0, "host2": 0.0},
+ Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
}
- result := p.runSteps(slog.Default(), request)
- if len(result) != 1 {
- t.Fatalf("expected 1 step result, got %d", len(result))
- }
- if _, ok := result["mock_pipeline_step"]; !ok {
- t.Fatalf("expected result for step 'mock_pipeline_step'")
- }
- if result["mock_pipeline_step"]["host2"] != 1.0 {
- t.Errorf("expected weight 1.0 for host2, got %f", result["mock_pipeline_step"]["host2"])
+ req := p.runFilters(slog.Default(), request)
+ if len(req.Subjects) != 2 {
+ t.Fatalf("expected 2 step results, got %d", len(req.Subjects))
}
}
diff --git a/internal/scheduling/lib/request.go b/internal/scheduling/lib/request.go
index 6ad7ef1c6..4c49299bd 100644
--- a/internal/scheduling/lib/request.go
+++ b/internal/scheduling/lib/request.go
@@ -8,6 +8,11 @@ import "log/slog"
type PipelineRequest interface {
// Get the subjects that went in the pipeline.
GetSubjects() []string
+ // This function can be used by the pipeline to obtain a mutated version
+ // of the request with only the given subjects remaining. This is helpful
+ // for steps that filter out subjects. Subjects not included in the map
+ // are considered as filtered out, and won't be reconsidered in later steps.
+ FilterSubjects(includedSubjects map[string]float64) PipelineRequest
// Get the weights for the subjects.
GetWeights() map[string]float64
// Get logging args to be used in the step's trace log.
diff --git a/internal/scheduling/lib/request_test.go b/internal/scheduling/lib/request_test.go
index 8706d81f8..455dcc8ac 100644
--- a/internal/scheduling/lib/request_test.go
+++ b/internal/scheduling/lib/request_test.go
@@ -18,7 +18,12 @@ func (m mockPipelineRequest) GetTraceLogArgs() []slog.Attr { return m.TraceLog
func (m mockPipelineRequest) GetSubjects() []string { return m.Subjects }
func (m mockPipelineRequest) GetWeights() map[string]float64 { return m.Weights }
func (m mockPipelineRequest) GetPipeline() string { return m.Pipeline }
-func (m mockPipelineRequest) WithPipeline(pipeline string) PipelineRequest {
- m.Pipeline = pipeline
+
+func (m mockPipelineRequest) FilterSubjects(subjects map[string]float64) PipelineRequest {
+ filteredSubjects := make([]string, 0, len(subjects))
+ for subject := range subjects {
+ filteredSubjects = append(filteredSubjects, subject)
+ }
+ m.Subjects = filteredSubjects
return m
}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index a25c55a2a..f68fb51d5 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -52,6 +52,14 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
Client client.Client
}
+// Common base implementation of a weigher step.
+// Functionally identical to BaseStep, but used for clarity.
+type Weigher[RequestType PipelineRequest, Opts StepOpts] struct{ BaseStep[RequestType, Opts] }
+
+// Common base implementation of a filter step.
+// Functionally identical to BaseStep, but used for clarity.
+type Filter[RequestType PipelineRequest, Opts StepOpts] struct{ BaseStep[RequestType, Opts] }
+
// Init the step with the database and options.
func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
opts := conf.NewRawOptsBytes(step.Opts.Raw)
diff --git a/internal/scheduling/lib/step_validation.go b/internal/scheduling/lib/step_validation.go
deleted file mode 100644
index 87cfa759a..000000000
--- a/internal/scheduling/lib/step_validation.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
- "errors"
- "log/slog"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Wrapper for scheduler steps that validates them before/after execution.
-type StepValidator[RequestType PipelineRequest] struct {
- // The wrapped step to validate.
- Step Step[RequestType]
- // By default, we execute all validations. However, through the config,
- // we can also disable some validations if necessary.
- DisabledValidations v1alpha1.DisabledValidationsSpec
-}
-
-// Initialize the wrapped step with the database and options.
-func (s *StepValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- slog.Info(
- "scheduler: init validation for step", "name", step.Name,
- "disabled", s.DisabledValidations,
- )
- return s.Step.Init(ctx, client, step)
-}
-
-// Validate the wrapped step with the database and options.
-func validateStep[RequestType PipelineRequest](step Step[RequestType], disabledValidations v1alpha1.DisabledValidationsSpec) *StepValidator[RequestType] {
- return &StepValidator[RequestType]{
- Step: step,
- DisabledValidations: disabledValidations,
- }
-}
-
-// Run the step and validate what happens.
-func (s *StepValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
- result, err := s.Step.Run(traceLog, request)
- if err != nil {
- return nil, err
- }
- // If not disabled, validate that the number of subjects stayed the same.
- // Note that for some schedulers the same subject (e.g. compute host) may
- // appear multiple times if there is a substruct (e.g. hypervisor hostname).
- // Since cortex will only schedule on the subject level and not below,
- // we need to deduplicate the subjects first before the validation.
- if !s.DisabledValidations.SameSubjectNumberInOut {
- deduplicated := map[string]struct{}{}
- for _, subject := range request.GetSubjects() {
- deduplicated[subject] = struct{}{}
- }
- if len(result.Activations) != len(deduplicated) {
- return nil, errors.New("safety: number of (deduplicated) subjects changed during step execution")
- }
- }
- // If not disabled, validate that some subjects remain.
- if !s.DisabledValidations.SomeSubjectsRemain {
- if len(result.Activations) == 0 {
- return nil, errors.New("safety: no subjects remain after step execution")
- }
- }
- return result, nil
-}
diff --git a/internal/scheduling/lib/step_validation_test.go b/internal/scheduling/lib/step_validation_test.go
deleted file mode 100644
index 2cdd5a52f..000000000
--- a/internal/scheduling/lib/step_validation_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "log/slog"
- "reflect"
- "testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
-)
-
-func TestStepValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
- Activations: map[string]float64{
- "host1": 1.0,
- "host2": 1.0,
- },
- }, nil
- },
- }
-
- request := mockPipelineRequest{
- Subjects: []string{"subject1", "subject2"},
- }
-
- validator := StepValidator[mockPipelineRequest]{
- Step: mockStep,
- DisabledValidations: v1alpha1.DisabledValidationsSpec{
- SameSubjectNumberInOut: false,
- },
- }
-
- result, err := validator.Run(slog.Default(), request)
- if err != nil {
- t.Errorf("Run() error = %v, want nil", err)
- }
-
- expectedWeights := map[string]float64{
- "host1": 1.0,
- "host2": 1.0,
- }
-
- if !reflect.DeepEqual(result.Activations, expectedWeights) {
- t.Errorf("Run() weights = %v, want %v", result.Activations, expectedWeights)
- }
-}
-
-func TestStepValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
- Activations: map[string]float64{
- "host1": 1.0,
- },
- }, nil
- },
- }
-
- request := mockPipelineRequest{
- Subjects: []string{"subject1", "subject2"},
- }
-
- validator := StepValidator[mockPipelineRequest]{
- Step: mockStep,
- DisabledValidations: v1alpha1.DisabledValidationsSpec{
- SameSubjectNumberInOut: false,
- },
- }
-
- result, err := validator.Run(slog.Default(), request)
- if err == nil {
- t.Errorf("Run() error = nil, want error")
- }
-
- if result != nil {
- t.Errorf("Run() weights = %v, want nil", result.Activations)
- }
-
- expectedError := "safety: number of (deduplicated) subjects changed during step execution"
- if err.Error() != expectedError {
- t.Errorf("Run() error = %v, want %v", err.Error(), expectedError)
- }
-}
-
-func TestStepValidator_Run_DisabledValidation(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
- Activations: map[string]float64{
- "host1": 1.0,
- },
- }, nil
- },
- }
-
- request := mockPipelineRequest{
- Subjects: []string{"subject1"},
- }
-
- validator := StepValidator[mockPipelineRequest]{
- Step: mockStep,
- DisabledValidations: v1alpha1.DisabledValidationsSpec{
- SameSubjectNumberInOut: true, // Validation is disabled
- },
- }
-
- result, err := validator.Run(slog.Default(), request)
- if err != nil {
- t.Errorf("Run() error = %v, want nil", err)
- }
-
- expectedWeights := map[string]float64{
- "host1": 1.0,
- }
-
- if !reflect.DeepEqual(result.Activations, expectedWeights) {
- t.Errorf("Run() weights = %v, want %v", result.Activations, expectedWeights)
- }
-}
-
-func TestValidateStep(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{}
- disabledValidations := v1alpha1.DisabledValidationsSpec{
- SameSubjectNumberInOut: true,
- }
-
- validator := validateStep(mockStep, disabledValidations)
- if !reflect.DeepEqual(validator.DisabledValidations, disabledValidations) {
- t.Errorf("validateStep() DisabledValidations = %v, want %v", validator.DisabledValidations, disabledValidations)
- }
-}
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
new file mode 100644
index 000000000..629ba7b6b
--- /dev/null
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -0,0 +1,54 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Wrapper for scheduler steps that validates them before/after execution.
+type WeigherValidator[RequestType PipelineRequest] struct {
+ // The wrapped weigher to validate.
+ Weigher Step[RequestType]
+}
+
+// Initialize the wrapped weigher with the database and options.
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ slog.Info("scheduler: init validation for step", "name", step.Name)
+ return s.Weigher.Init(ctx, client, step)
+}
+
+// Validate the wrapped weigher with the database and options.
+func validateWeigher[RequestType PipelineRequest](weigher Step[RequestType]) *WeigherValidator[RequestType] {
+ return &WeigherValidator[RequestType]{Weigher: weigher}
+}
+
+// Run the weigher and validate what happens.
+func (s *WeigherValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ result, err := s.Weigher.Run(traceLog, request)
+ if err != nil {
+ return nil, err
+ }
+ // Note that for some schedulers the same subject (e.g. compute host) may
+ // appear multiple times if there is a substruct (e.g. hypervisor hostname).
+ // Since cortex will only schedule on the subject level and not below,
+ // we need to deduplicate the subjects first before the validation.
+ deduplicated := map[string]struct{}{}
+ for _, subject := range request.GetSubjects() {
+ deduplicated[subject] = struct{}{}
+ }
+ if len(result.Activations) != len(deduplicated) {
+ return nil, errors.New("safety: number of (deduplicated) subjects changed during step execution")
+ }
+ // Validate that some subjects remain.
+ if len(result.Activations) == 0 {
+ return nil, errors.New("safety: no subjects remain after step execution")
+ }
+ return result, nil
+}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
new file mode 100644
index 000000000..aa6cba851
--- /dev/null
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -0,0 +1,79 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "log/slog"
+ "reflect"
+ "testing"
+)
+
+func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
+ mockStep := &mockStep[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ return &StepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ },
+ }, nil
+ },
+ }
+
+ request := mockPipelineRequest{
+ Subjects: []string{"subject1", "subject2"},
+ }
+
+ validator := WeigherValidator[mockPipelineRequest]{
+ Weigher: mockStep,
+ }
+
+ result, err := validator.Run(slog.Default(), request)
+ if err != nil {
+ t.Errorf("Run() error = %v, want nil", err)
+ }
+
+ expectedWeights := map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ }
+
+ if !reflect.DeepEqual(result.Activations, expectedWeights) {
+ t.Errorf("Run() weights = %v, want %v", result.Activations, expectedWeights)
+ }
+}
+
+func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
+ mockStep := &mockStep[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ return &StepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ },
+ }, nil
+ },
+ }
+
+ request := mockPipelineRequest{
+ Subjects: []string{"subject1", "subject2"},
+ }
+
+ validator := WeigherValidator[mockPipelineRequest]{
+ Weigher: mockStep,
+ }
+
+ result, err := validator.Run(slog.Default(), request)
+ if err == nil {
+ t.Errorf("Run() error = nil, want error")
+ }
+
+ if result != nil {
+ t.Errorf("Run() weights = %v, want nil", result.Activations)
+ }
+
+ expectedError := "safety: number of (deduplicated) subjects changed during step execution"
+ if err.Error() != expectedError {
+ t.Errorf("Run() error = %v, want %v", err.Error(), expectedError)
+ }
+}
From a307312da45c69c2c1976d32a5142c93d139e806 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Wed, 21 Jan 2026 16:37:50 +0100
Subject: [PATCH 03/41] Use Filter and Weigher instead of BaseStep
---
.../manila/plugins/weighers/netapp_cpu_usage_balancing.go | 4 ++--
.../nova/plugins/filters/filter_allowed_projects.go | 2 +-
.../decisions/nova/plugins/filters/filter_capabilities.go | 2 +-
.../decisions/nova/plugins/filters/filter_correct_az.go | 2 +-
.../nova/plugins/filters/filter_external_customer.go | 2 +-
.../nova/plugins/filters/filter_has_accelerators.go | 2 +-
.../nova/plugins/filters/filter_has_enough_capacity.go | 2 +-
.../nova/plugins/filters/filter_has_requested_traits.go | 2 +-
.../nova/plugins/filters/filter_host_instructions.go | 2 +-
.../nova/plugins/filters/filter_instance_group_affinity.go | 2 +-
.../plugins/filters/filter_instance_group_anti_affinity.go | 2 +-
.../nova/plugins/filters/filter_live_migratable.go | 2 +-
.../nova/plugins/filters/filter_live_migratable_test.go | 6 +++---
.../decisions/nova/plugins/filters/filter_maintenance.go | 2 +-
.../nova/plugins/filters/filter_packed_virtqueue.go | 2 +-
.../nova/plugins/filters/filter_requested_destination.go | 2 +-
.../plugins/filters/filter_requested_destination_test.go | 4 ++--
.../nova/plugins/filters/filter_status_conditions.go | 2 +-
.../plugins/weighers/vmware_anti_affinity_noisy_projects.go | 4 ++--
.../weighers/vmware_avoid_long_term_contended_hosts.go | 4 ++--
.../weighers/vmware_avoid_short_term_contended_hosts.go | 4 ++--
.../plugins/weighers/vmware_general_purpose_balancing.go | 4 ++--
.../nova/plugins/weighers/vmware_hana_binpacking.go | 4 ++--
.../scheduling/decisions/pods/plugins/weighers/binpack.go | 2 +-
internal/scheduling/lib/step.go | 4 ++--
25 files changed, 35 insertions(+), 35 deletions(-)
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index 539988c3a..fced9696c 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -44,8 +44,8 @@ func (o NetappCPUUsageBalancingStepOpts) Validate() error {
// Step to balance CPU usage by avoiding highly used storage pools.
type NetappCPUUsageBalancingStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- scheduling.BaseStep[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ scheduling.Weigher[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index 215a0f6b3..b3e5f832a 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -14,7 +14,7 @@ import (
)
type FilterAllowedProjectsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Lock certain hosts for certain projects, based on the hypervisor spec.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index ea0c86b7f..b76258229 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -15,7 +15,7 @@ import (
)
type FilterCapabilitiesStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Get the provided capabilities of a hypervisor resource in the format Nova expects.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index 744edfb62..ccdd6891a 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -13,7 +13,7 @@ import (
)
type FilterCorrectAZStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Only get hosts in the requested az.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index 7385063dd..db175842e 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -28,7 +28,7 @@ func (opts FilterExternalCustomerStepOpts) Validate() error {
}
type FilterExternalCustomerStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
}
// Prefix-match the domain name for external customer domains and filter out hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 04918542c..764859bdd 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -14,7 +14,7 @@ import (
)
type FilterHasAcceleratorsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with accelerators.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index 62d0f5968..892c77416 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -23,7 +23,7 @@ type FilterHasEnoughCapacityOpts struct {
func (FilterHasEnoughCapacityOpts) Validate() error { return nil }
type FilterHasEnoughCapacity struct {
- lib.BaseStep[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
+ lib.Filter[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
}
// Filter hosts that don't have enough capacity to run the requested flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index 14cf927a8..9d7da99fc 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -15,7 +15,7 @@ import (
)
type FilterHasRequestedTraits struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts that do not have the requested traits given by the extra spec:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index b20f041f8..ce121d740 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -12,7 +12,7 @@ import (
)
type FilterHostInstructionsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts based on instructions given in the request spec. Supported are:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index f75abc596..cf9c8ed3b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -12,7 +12,7 @@ import (
)
type FilterInstanceGroupAffinityStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts in spec.instance_group.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index 78fbb4c84..48864d189 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -14,7 +14,7 @@ import (
)
type FilterInstanceGroupAntiAffinityStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts not in spec_obj.instance_group but only until
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index 7554099be..0186adb39 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -15,7 +15,7 @@ import (
)
type FilterLiveMigratableStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check if the encountered request spec is a live migration.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index c719a3eb6..aed728fae 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -727,7 +727,7 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Filter: lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -812,7 +812,7 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -856,7 +856,7 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index 15dc4eafd..4a8ca01f7 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -13,7 +13,7 @@ import (
)
type FilterMaintenanceStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index 836ffd05e..c78d9325a 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -14,7 +14,7 @@ import (
)
type FilterPackedVirtqueueStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with packed virtqueues.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index 9a7ab3462..7b80186b8 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -14,7 +14,7 @@ import (
)
type FilterRequestedDestinationStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If `requested_destination` is set in the request spec, filter hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index ca1faaa07..18db483a0 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -494,7 +494,7 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -575,7 +575,7 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index 0ea1f037b..52c00eebb 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -15,7 +15,7 @@ import (
)
type FilterStatusConditionsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that all status conditions meet the expected values, for example,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 5dae93259..5a79b1371 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -35,8 +35,8 @@ func (o VMwareAntiAffinityNoisyProjectsStepOpts) Validate() error {
// Step to avoid noisy projects by downvoting the hosts they are running on.
type VMwareAntiAffinityNoisyProjectsStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ lib.Weigher[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
}
// Downvote the hosts a project is currently running on if it's noisy.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index f06aa49c2..40e104d04 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -44,8 +44,8 @@ func (o VMwareAvoidLongTermContendedHostsStepOpts) Validate() error {
// Step to avoid long term contended hosts by downvoting them.
type VMwareAvoidLongTermContendedHostsStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ lib.Weigher[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index f584765b6..d6f7871b5 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -44,8 +44,8 @@ func (o VMwareAvoidShortTermContendedHostsStepOpts) Validate() error {
// Step to avoid recently contended hosts by downvoting them.
type VMwareAvoidShortTermContendedHostsStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ lib.Weigher[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index ef97caae5..961ce2bd7 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -34,8 +34,8 @@ func (o VMwareGeneralPurposeBalancingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareGeneralPurposeBalancingStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- scheduling.BaseStep[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ scheduling.Weigher[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
}
// Pack VMs on hosts based on their flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index 962b639c7..d8e3495b9 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -34,8 +34,8 @@ func (o VMwareHanaBinpackingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareHanaBinpackingStep struct {
- // BaseStep is a helper struct that provides common functionality for all steps.
- scheduling.BaseStep[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
+ // Weigher is a helper struct that provides common functionality for all steps.
+ scheduling.Weigher[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
}
// Pack VMs on hosts based on their flavor.
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index 80ae3b7d8..8c1e19bde 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -28,7 +28,7 @@ func (o BinpackingStepOpts) Validate() error {
}
type BinpackingStep struct {
- scheduling.BaseStep[api.PodPipelineRequest, BinpackingStepOpts]
+ scheduling.Weigher[api.PodPipelineRequest, BinpackingStepOpts]
}
func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*scheduling.StepResult, error) {
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index f68fb51d5..5e533a261 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -54,11 +54,11 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
// Common base implementation of a weigher step.
// Functionally identical to BaseStep, but used for clarity.
-type Weigher[RequestType PipelineRequest, Opts StepOpts] struct{ BaseStep[RequestType, Opts] }
+type Weigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts]
// Common base implementation of a filter step.
// Functionally identical to BaseStep, but used for clarity.
-type Filter[RequestType PipelineRequest, Opts StepOpts] struct{ BaseStep[RequestType, Opts] }
+type Filter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts]
// Init the step with the database and options.
func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
From 3c202baeac0f715b24d3a9b2b45bd74e94f5c856 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Wed, 21 Jan 2026 17:18:09 +0100
Subject: [PATCH 04/41] WIP: I want to separate filters and weighers in code
---
api/v1alpha1/pipeline_types.go | 44 +++++++++++++------
api/v1alpha1/zz_generated.deepcopy.go | 22 ++--------
.../cinder/pipeline_controller_test.go | 4 +-
.../machines/pipeline_controller_test.go | 8 +---
.../manila/pipeline_controller_test.go | 12 ++---
.../nova/pipeline_controller_test.go | 20 ++++-----
.../pods/pipeline_controller_test.go | 4 +-
.../descheduling/nova/monitor_test.go | 14 +++---
.../nova/pipeline_controller_test.go | 4 +-
.../descheduling/nova/pipeline_test.go | 10 ++---
.../descheduling/nova/plugins/base_test.go | 10 ++---
.../lib/pipeline_controller_test.go | 30 ++++---------
12 files changed, 79 insertions(+), 103 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index cf965111f..45ab2491a 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -13,7 +13,18 @@ import (
// valid candidates. Filters are run before weighers are applied, as
// part of a filter-weigher scheduling pipeline.
type FilterSpec struct {
- StepSpec `json:",inline"` // Embed common step spec fields.
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the extractor that can be used
+ // +kubebuilder:validation:Optional
+ Opts runtime.RawExtension `json:"opts,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
// Filters are not allowed to depend on knowledges, as knowledges can
// be outdated leading to invalid filtering decisions.
@@ -23,7 +34,18 @@ type FilterSpec struct {
// making some hosts more preferable than others. Weighers are run
// after filters are applied, as part of a filter-weigher scheduling pipeline.
type WeigherSpec struct {
- StepSpec `json:",inline"` // Embed common step spec fields.
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the extractor that can be used
+ // +kubebuilder:validation:Optional
+ Opts runtime.RawExtension `json:"opts,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
// Knowledges this step depends on to be ready.
//
@@ -37,17 +59,6 @@ type WeigherSpec struct {
// These detectors are run after weighers are applied, as part of a
// descheduler scheduling pipeline.
type DetectorSpec struct {
- StepSpec `json:",inline"` // Embed common step spec fields.
-
- // Knowledges this step depends on to be ready.
- //
- // Detectors can depend on knowledges as they don't ensure valid placements
- // and therefore are not on the critical path.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
-}
-
-type StepSpec struct {
// The name of the scheduler step in the cortex implementation.
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
@@ -60,6 +71,13 @@ type StepSpec struct {
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
+
+ // Knowledges this step depends on to be ready.
+ //
+ // Detectors can depend on knowledges as they don't ensure valid placements
+ // and therefore are not on the critical path.
+ // +kubebuilder:validation:Optional
+ Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
type PipelineType string
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index b75142551..785455a08 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -428,7 +428,7 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
*out = *in
- in.StepSpec.DeepCopyInto(&out.StepSpec)
+ in.Opts.DeepCopyInto(&out.Opts)
if in.Knowledges != nil {
in, out := &in.Knowledges, &out.Knowledges
*out = make([]v1.ObjectReference, len(*in))
@@ -449,7 +449,7 @@ func (in *DetectorSpec) DeepCopy() *DetectorSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
*out = *in
- in.StepSpec.DeepCopyInto(&out.StepSpec)
+ in.Opts.DeepCopyInto(&out.Opts)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
@@ -1134,26 +1134,10 @@ func (in *StepResult) DeepCopy() *StepResult {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepSpec) DeepCopyInto(out *StepSpec) {
- *out = *in
- in.Opts.DeepCopyInto(&out.Opts)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
-func (in *StepSpec) DeepCopy() *StepSpec {
- if in == nil {
- return nil
- }
- out := new(StepSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
*out = *in
- in.StepSpec.DeepCopyInto(&out.StepSpec)
+ in.Opts.DeepCopyInto(&out.Opts)
if in.Knowledges != nil {
in, out := &in.Knowledges, &out.Knowledges
*out = make([]v1.ObjectReference, len(*in))
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index a8fbc9598..2b7cda1bf 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -490,12 +490,12 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "test-plugin"},
+ Name: "test-plugin",
},
},
weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "test-plugin"},
+ Name: "test-plugin",
},
},
expectError: true, // Expected because test-plugin is not in supportedSteps
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 0a2f8b00b..e824fee72 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -224,18 +224,14 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
{
name: "noop step",
weighers: []v1alpha1.WeigherSpec{
- {
- StepSpec: v1alpha1.StepSpec{Name: "noop"},
- },
+ {Name: "noop"},
},
expectError: false,
},
{
name: "unsupported step",
filters: []v1alpha1.FilterSpec{
- {
- StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
- },
+ {Name: "unsupported"},
},
expectError: true,
},
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index b404e0a89..d280f0e05 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -485,11 +485,9 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "supported netapp step",
weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "netapp_cpu_usage_balancing",
- Opts: runtime.RawExtension{
- Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
- },
+ Name: "netapp_cpu_usage_balancing",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
},
},
},
@@ -499,9 +497,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "unsupported-plugin",
- },
+ Name: "unsupported-plugin",
},
},
expectError: true,
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index dbcf3929b..b02face53 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -278,7 +278,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "supported step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "filter_status_conditions"},
+ Name: "filter_status_conditions",
},
},
expectError: false,
@@ -287,7 +287,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "unsupported-plugin"},
+ Name: "unsupported-plugin",
},
},
expectError: true,
@@ -296,11 +296,9 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "step with scoping options",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
- Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
- },
+ Name: "filter_status_conditions",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
},
},
},
@@ -310,11 +308,9 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "step with invalid scoping options",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
- Raw: []byte(`invalid json`),
- },
+ Name: "filter_status_conditions",
+ Opts: runtime.RawExtension{
+ Raw: []byte(`invalid json`),
},
},
},
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index fbe493f4a..4d93a1720 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -200,7 +200,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "noop step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "noop"},
+ Name: "noop",
},
},
expectError: false,
@@ -209,7 +209,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
+ Name: "unsupported",
},
},
expectError: true,
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index 5776acad0..ed7416848 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -97,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{StepSpec: v1alpha1.StepSpec{Name: "test-step"}}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index e110e3914..86a254d93 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -48,7 +48,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
name: "successful pipeline initialization",
steps: []v1alpha1.DetectorSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "mock-step"},
+ Name: "mock-step",
},
},
expectError: false,
@@ -57,7 +57,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
name: "unsupported step",
steps: []v1alpha1.DetectorSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "unsupported"},
+ Name: "unsupported",
},
},
expectError: true,
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index 476fc2de9..d006f2b8f 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -52,7 +52,7 @@ func TestPipeline_Init(t *testing.T) {
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.DetectorSpec{{
- StepSpec: v1alpha1.StepSpec{Name: "test-step"},
+ Name: "test-step",
}},
expectedSteps: 1,
},
@@ -62,7 +62,7 @@ func TestPipeline_Init(t *testing.T) {
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.DetectorSpec{{
- StepSpec: v1alpha1.StepSpec{Name: "unsupported-step"},
+ Name: "unsupported-step",
}},
expectedError: true,
},
@@ -72,7 +72,7 @@ func TestPipeline_Init(t *testing.T) {
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
confedSteps: []v1alpha1.DetectorSpec{{
- StepSpec: v1alpha1.StepSpec{Name: "failing-step"},
+ Name: "failing-step",
}},
expectedError: true,
},
@@ -84,10 +84,10 @@ func TestPipeline_Init(t *testing.T) {
},
confedSteps: []v1alpha1.DetectorSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "step1"},
+ Name: "step1",
},
{
- StepSpec: v1alpha1.StepSpec{Name: "step2"},
+ Name: "step2",
},
},
expectedSteps: 2,
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index 4f01dfc32..a0f581c0a 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -24,12 +24,10 @@ func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
- StepSpec: v1alpha1.StepSpec{
- Opts: runtime.RawExtension{Raw: []byte(`{
- "option1": "value1",
- "option2": 2
- }`)},
- },
+ Opts: runtime.RawExtension{Raw: []byte(`{
+ "option1": "value1",
+ "option2": 2
+ }`)},
})
if err != nil {
t.Fatalf("expected no error, got %v", err)
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index e2706b8de..7a85999b6 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -204,12 +204,12 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Filters: []v1alpha1.FilterSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "test-filter"},
+ Name: "test-filter",
},
},
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "test-weigher"},
+ Name: "test-weigher",
},
},
},
@@ -243,7 +243,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{Name: "test-weigher"},
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "missing-knowledge", Namespace: "default"},
},
@@ -669,9 +669,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -688,9 +686,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "other-knowledge", Namespace: "default"},
},
@@ -723,9 +719,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -802,9 +796,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -955,9 +947,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
@@ -1025,9 +1015,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
Weighers: []v1alpha1.WeigherSpec{
{
- StepSpec: v1alpha1.StepSpec{
- Name: "test-weigher",
- },
+ Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
{Name: "test-knowledge", Namespace: "default"},
},
From 9672580b0c3b47569214558b4cc6003ef381ac46 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 09:53:16 +0100
Subject: [PATCH 05/41] Split filter and weigher in implementation
---
api/v1alpha1/pipeline_types.go | 22 +++++++++
.../decisions/cinder/supported_steps.go | 10 ++--
.../scheduling/decisions/machines/noop.go | 2 +-
.../machines/pipeline_controller_test.go | 2 +-
.../decisions/machines/supported_steps.go | 12 +++--
.../weighers/netapp_cpu_usage_balancing.go | 10 ++--
.../decisions/manila/supported_steps.go | 10 ++--
.../filters/filter_allowed_projects.go | 2 +-
.../plugins/filters/filter_capabilities.go | 2 +-
.../nova/plugins/filters/filter_correct_az.go | 2 +-
.../filters/filter_external_customer.go | 2 +-
.../filters/filter_has_accelerators.go | 2 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filters/filter_has_requested_traits.go | 2 +-
.../filters/filter_host_instructions.go | 2 +-
.../filters/filter_instance_group_affinity.go | 2 +-
.../filter_instance_group_anti_affinity.go | 2 +-
.../plugins/filters/filter_live_migratable.go | 2 +-
.../filters/filter_live_migratable_test.go | 6 +--
.../plugins/filters/filter_maintenance.go | 2 +-
.../filters/filter_packed_virtqueue.go | 2 +-
.../filters/filter_requested_destination.go | 2 +-
.../filter_requested_destination_test.go | 4 +-
.../filters/filter_status_conditions.go | 2 +-
.../vmware_anti_affinity_noisy_projects.go | 2 +-
.../vmware_avoid_long_term_contended_hosts.go | 2 +-
...vmware_avoid_short_term_contended_hosts.go | 2 +-
.../vmware_general_purpose_balancing.go | 8 ++--
.../weighers/vmware_hana_binpacking.go | 8 ++--
.../decisions/nova/supported_steps.go | 48 ++++++++++---------
.../plugins/filters/filter_node_affinity.go | 2 +-
.../plugins/filters/filter_node_available.go | 2 +-
.../plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_noop.go | 2 +-
.../pods/plugins/filters/filter_taint.go | 2 +-
.../pods/plugins/weighers/binpack.go | 6 +--
.../decisions/pods/supported_steps.go | 18 +++----
internal/scheduling/lib/pipeline.go | 28 +++++------
internal/scheduling/lib/pipeline_test.go | 12 ++---
internal/scheduling/lib/step.go | 24 ++++++----
internal/scheduling/lib/step_monitor.go | 24 +++++-----
internal/scheduling/lib/step_monitor_test.go | 6 ++-
internal/scheduling/lib/step_test.go | 8 ++--
internal/scheduling/lib/weigher_validation.go | 6 +--
.../scheduling/lib/weigher_validation_test.go | 6 ++-
45 files changed, 184 insertions(+), 142 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 45ab2491a..449ec0309 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -9,6 +9,16 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// Step as part of a cortex pipeline.
+type Step interface {
+ // Every step must have options so the pipeline can configure it.
+ GetOpts() runtime.RawExtension
+ // Every step must have a name so the pipeline can identify it.
+ GetName() string
+ // Every step can have an optional description.
+ GetDescription() string
+}
+
// Filters remove host candidates from an initial set, leaving
// valid candidates. Filters are run before weighers are applied, as
// part of a filter-weigher scheduling pipeline.
@@ -30,6 +40,10 @@ type FilterSpec struct {
// be outdated leading to invalid filtering decisions.
}
+func (f FilterSpec) GetOpts() runtime.RawExtension { return f.Opts }
+func (f FilterSpec) GetName() string { return f.Name }
+func (f FilterSpec) GetDescription() string { return f.Description }
+
// Weighers assign weights to the remaining host candidates after filtering,
// making some hosts more preferable than others. Weighers are run
// after filters are applied, as part of a filter-weigher scheduling pipeline.
@@ -55,6 +69,10 @@ type WeigherSpec struct {
Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
+func (w WeigherSpec) GetOpts() runtime.RawExtension { return w.Opts }
+func (w WeigherSpec) GetName() string { return w.Name }
+func (w WeigherSpec) GetDescription() string { return w.Description }
+
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied, as part of a
// descheduler scheduling pipeline.
@@ -80,6 +98,10 @@ type DetectorSpec struct {
Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
+func (d DetectorSpec) GetOpts() runtime.RawExtension { return d.Opts }
+func (d DetectorSpec) GetName() string { return d.Name }
+func (d DetectorSpec) GetDescription() string { return d.Description }
+
type PipelineType string
const (
diff --git a/internal/scheduling/decisions/cinder/supported_steps.go b/internal/scheduling/decisions/cinder/supported_steps.go
index c10ed46a4..90e5dc95d 100644
--- a/internal/scheduling/decisions/cinder/supported_steps.go
+++ b/internal/scheduling/decisions/cinder/supported_steps.go
@@ -8,10 +8,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type CinderStep = lib.Step[api.ExternalSchedulerRequest]
+type CinderWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the cinder scheduling.
-var supportedWeighers = map[string]func() CinderStep{}
+var supportedWeighers = map[string]func() CinderWeigher{}
-// Configuration of filters supported by the machine scheduling.
-var supportedFilters = map[string]func() CinderStep{}
+type CinderFilter = lib.Filter[api.ExternalSchedulerRequest]
+
+// Configuration of filters supported by the cinder scheduling.
+var supportedFilters = map[string]func() CinderFilter{}
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index 3b0104aa6..6dfa7911b 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -15,7 +15,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index e824fee72..157b8ac13 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -223,7 +223,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "noop step",
- weighers: []v1alpha1.WeigherSpec{
+ filters: []v1alpha1.FilterSpec{
{Name: "noop"},
},
expectError: false,
diff --git a/internal/scheduling/decisions/machines/supported_steps.go b/internal/scheduling/decisions/machines/supported_steps.go
index 9400ee922..4e04d64d1 100644
--- a/internal/scheduling/decisions/machines/supported_steps.go
+++ b/internal/scheduling/decisions/machines/supported_steps.go
@@ -8,12 +8,14 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type MachineStep = lib.Step[ironcore.MachinePipelineRequest]
+type MachineWeigher = lib.Weigher[ironcore.MachinePipelineRequest]
// Configuration of weighers supported by the machine scheduling.
-var supportedWeighers = map[string]func() MachineStep{
- "noop": func() MachineStep { return &NoopFilter{} },
-}
+var supportedWeighers = map[string]func() MachineWeigher{}
+
+type MachineFilter = lib.Filter[ironcore.MachinePipelineRequest]
// Configuration of filters supported by the machine scheduling.
-var supportedFilters = map[string]func() MachineStep{}
+var supportedFilters = map[string]func() MachineFilter{
+ "noop": func() MachineFilter { return &NoopFilter{} },
+}
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index fced9696c..862e95153 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -11,7 +11,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/delegation/manila"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/storage"
- scheduling "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -45,11 +45,11 @@ func (o NetappCPUUsageBalancingStepOpts) Validate() error {
// Step to balance CPU usage by avoiding highly used storage pools.
type NetappCPUUsageBalancingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- scheduling.Weigher[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
}
// Downvote hosts that are highly contended.
-func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*scheduling.StepResult, error) {
+func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.PrepareResult(request)
result.Statistics["avg cpu contention"] = s.PrepareStats(request, "%")
result.Statistics["max cpu contention"] = s.PrepareStats(request, "%")
@@ -74,14 +74,14 @@ func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.Ext
if _, ok := result.Activations[usage.StoragePoolName]; !ok {
continue
}
- activationAvg := scheduling.MinMaxScale(
+ activationAvg := lib.MinMaxScale(
usage.AvgCPUUsagePct,
s.Options.AvgCPUUsageLowerBound,
s.Options.AvgCPUUsageUpperBound,
s.Options.AvgCPUUsageActivationLowerBound,
s.Options.AvgCPUUsageActivationUpperBound,
)
- activationMax := scheduling.MinMaxScale(
+ activationMax := lib.MinMaxScale(
usage.MaxCPUUsagePct,
s.Options.MaxCPUUsageLowerBound,
s.Options.MaxCPUUsageUpperBound,
diff --git a/internal/scheduling/decisions/manila/supported_steps.go b/internal/scheduling/decisions/manila/supported_steps.go
index 4530db6a7..fca819711 100644
--- a/internal/scheduling/decisions/manila/supported_steps.go
+++ b/internal/scheduling/decisions/manila/supported_steps.go
@@ -9,12 +9,14 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type ManilaStep = lib.Step[api.ExternalSchedulerRequest]
+type ManilaFilter = lib.Filter[api.ExternalSchedulerRequest]
// Configuration of filters supported by the manila scheduler.
-var supportedFilters = map[string]func() ManilaStep{}
+var supportedFilters = map[string]func() ManilaFilter{}
+
+type ManilaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the manila scheduler.
-var supportedWeighers = map[string]func() ManilaStep{
- "netapp_cpu_usage_balancing": func() ManilaStep { return &weighers.NetappCPUUsageBalancingStep{} },
+var supportedWeighers = map[string]func() ManilaWeigher{
+ "netapp_cpu_usage_balancing": func() ManilaWeigher { return &weighers.NetappCPUUsageBalancingStep{} },
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index b3e5f832a..37c5e439d 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -14,7 +14,7 @@ import (
)
type FilterAllowedProjectsStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Lock certain hosts for certain projects, based on the hypervisor spec.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index b76258229..6a9c8bf83 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -15,7 +15,7 @@ import (
)
type FilterCapabilitiesStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Get the provided capabilities of a hypervisor resource in the format Nova expects.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index ccdd6891a..9490f7cfb 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -13,7 +13,7 @@ import (
)
type FilterCorrectAZStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Only get hosts in the requested az.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index db175842e..8803a05db 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -28,7 +28,7 @@ func (opts FilterExternalCustomerStepOpts) Validate() error {
}
type FilterExternalCustomerStep struct {
- lib.Filter[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
}
// Prefix-match the domain name for external customer domains and filter out hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 764859bdd..24de1e726 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -14,7 +14,7 @@ import (
)
type FilterHasAcceleratorsStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with accelerators.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index 892c77416..838afbe4b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -23,7 +23,7 @@ type FilterHasEnoughCapacityOpts struct {
func (FilterHasEnoughCapacityOpts) Validate() error { return nil }
type FilterHasEnoughCapacity struct {
- lib.Filter[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
}
// Filter hosts that don't have enough capacity to run the requested flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index 9d7da99fc..b1681f6b6 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -15,7 +15,7 @@ import (
)
type FilterHasRequestedTraits struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts that do not have the requested traits given by the extra spec:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index ce121d740..8f21bddc4 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -12,7 +12,7 @@ import (
)
type FilterHostInstructionsStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts based on instructions given in the request spec. Supported are:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index cf9c8ed3b..a6b6f48b2 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -12,7 +12,7 @@ import (
)
type FilterInstanceGroupAffinityStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts in spec.instance_group.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index 48864d189..b6a936074 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -14,7 +14,7 @@ import (
)
type FilterInstanceGroupAntiAffinityStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts not in spec_obj.instance_group but only until
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index 0186adb39..52b8cb919 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -15,7 +15,7 @@ import (
)
type FilterLiveMigratableStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check if the encountered request spec is a live migration.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index aed728fae..edb33c5c0 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -727,7 +727,7 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- Filter: lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -812,7 +812,7 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -856,7 +856,7 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index 4a8ca01f7..57136d451 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -13,7 +13,7 @@ import (
)
type FilterMaintenanceStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index c78d9325a..4bd6b8caf 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -14,7 +14,7 @@ import (
)
type FilterPackedVirtqueueStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with packed virtqueues.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index 7b80186b8..17d2339d8 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -14,7 +14,7 @@ import (
)
type FilterRequestedDestinationStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If `requested_destination` is set in the request spec, filter hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index 18db483a0..3ba008214 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -494,7 +494,7 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -575,7 +575,7 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- Filter: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index 52c00eebb..40ee90a4e 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -15,7 +15,7 @@ import (
)
type FilterStatusConditionsStep struct {
- lib.Filter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that all status conditions meet the expected values, for example,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 5a79b1371..4ee355f58 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -36,7 +36,7 @@ func (o VMwareAntiAffinityNoisyProjectsStepOpts) Validate() error {
// Step to avoid noisy projects by downvoting the hosts they are running on.
type VMwareAntiAffinityNoisyProjectsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.Weigher[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
}
// Downvote the hosts a project is currently running on if it's noisy.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index 40e104d04..aca9380ec 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -45,7 +45,7 @@ func (o VMwareAvoidLongTermContendedHostsStepOpts) Validate() error {
// Step to avoid long term contended hosts by downvoting them.
type VMwareAvoidLongTermContendedHostsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.Weigher[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index d6f7871b5..65d75fb91 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -45,7 +45,7 @@ func (o VMwareAvoidShortTermContendedHostsStepOpts) Validate() error {
// Step to avoid recently contended hosts by downvoting them.
type VMwareAvoidShortTermContendedHostsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.Weigher[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index 961ce2bd7..6973b749c 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -12,7 +12,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- scheduling "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -35,11 +35,11 @@ func (o VMwareGeneralPurposeBalancingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareGeneralPurposeBalancingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- scheduling.Weigher[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
}
// Pack VMs on hosts based on their flavor.
-func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*scheduling.StepResult, error) {
+func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.PrepareResult(request)
// Don't execute the step for non-hana flavors.
if strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
@@ -74,7 +74,7 @@ func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request a
result.
Statistics["ram utilized"].
Subjects[hostUtilization.ComputeHost] = hostUtilization.RAMUtilizedPct
- result.Activations[hostUtilization.ComputeHost] = scheduling.MinMaxScale(
+ result.Activations[hostUtilization.ComputeHost] = lib.MinMaxScale(
hostUtilization.RAMUtilizedPct,
s.Options.RAMUtilizedLowerBoundPct,
s.Options.RAMUtilizedUpperBoundPct,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index d8e3495b9..7993841c6 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -12,7 +12,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- scheduling "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -35,11 +35,11 @@ func (o VMwareHanaBinpackingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareHanaBinpackingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- scheduling.Weigher[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
}
// Pack VMs on hosts based on their flavor.
-func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*scheduling.StepResult, error) {
+func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.PrepareResult(request)
// Don't execute the step for non-hana flavors.
if !strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
@@ -124,7 +124,7 @@ func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.Extern
if after < s.Options.RAMUtilizedAfterLowerBoundPct || after > s.Options.RAMUtilizedAfterUpperBoundPct {
result.Activations[hostUtilization.ComputeHost] = s.NoEffect()
} else {
- result.Activations[hostUtilization.ComputeHost] = scheduling.MinMaxScale(
+ result.Activations[hostUtilization.ComputeHost] = lib.MinMaxScale(
after,
s.Options.RAMUtilizedAfterLowerBoundPct,
s.Options.RAMUtilizedAfterUpperBoundPct,
diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_steps.go
index 45699744c..4821e7ba5 100644
--- a/internal/scheduling/decisions/nova/supported_steps.go
+++ b/internal/scheduling/decisions/nova/supported_steps.go
@@ -10,32 +10,34 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type NovaStep = lib.Step[api.ExternalSchedulerRequest]
+type NovaFilter = lib.Filter[api.ExternalSchedulerRequest]
// Configuration of filters supported by the nova scheduler.
-var supportedFilters = map[string]func() NovaStep{
- "filter_has_accelerators": func() NovaStep { return &filters.FilterHasAcceleratorsStep{} },
- "filter_correct_az": func() NovaStep { return &filters.FilterCorrectAZStep{} },
- "filter_status_conditions": func() NovaStep { return &filters.FilterStatusConditionsStep{} },
- "filter_maintenance": func() NovaStep { return &filters.FilterMaintenanceStep{} },
- "filter_packed_virtqueue": func() NovaStep { return &filters.FilterPackedVirtqueueStep{} },
- "filter_external_customer": func() NovaStep { return &filters.FilterExternalCustomerStep{} },
- "filter_allowed_projects": func() NovaStep { return &filters.FilterAllowedProjectsStep{} },
- "filter_capabilities": func() NovaStep { return &filters.FilterCapabilitiesStep{} },
- "filter_has_requested_traits": func() NovaStep { return &filters.FilterHasRequestedTraits{} },
- "filter_has_enough_capacity": func() NovaStep { return &filters.FilterHasEnoughCapacity{} },
- "filter_host_instructions": func() NovaStep { return &filters.FilterHostInstructionsStep{} },
- "filter_instance_group_affinity": func() NovaStep { return &filters.FilterInstanceGroupAffinityStep{} },
- "filter_instance_group_anti_affinity": func() NovaStep { return &filters.FilterInstanceGroupAntiAffinityStep{} },
- "filter_live_migratable": func() NovaStep { return &filters.FilterLiveMigratableStep{} },
- "filter_requested_destination": func() NovaStep { return &filters.FilterRequestedDestinationStep{} },
+var supportedFilters = map[string]func() NovaFilter{
+ "filter_has_accelerators": func() NovaFilter { return &filters.FilterHasAcceleratorsStep{} },
+ "filter_correct_az": func() NovaFilter { return &filters.FilterCorrectAZStep{} },
+ "filter_status_conditions": func() NovaFilter { return &filters.FilterStatusConditionsStep{} },
+ "filter_maintenance": func() NovaFilter { return &filters.FilterMaintenanceStep{} },
+ "filter_packed_virtqueue": func() NovaFilter { return &filters.FilterPackedVirtqueueStep{} },
+ "filter_external_customer": func() NovaFilter { return &filters.FilterExternalCustomerStep{} },
+ "filter_allowed_projects": func() NovaFilter { return &filters.FilterAllowedProjectsStep{} },
+ "filter_capabilities": func() NovaFilter { return &filters.FilterCapabilitiesStep{} },
+ "filter_has_requested_traits": func() NovaFilter { return &filters.FilterHasRequestedTraits{} },
+ "filter_has_enough_capacity": func() NovaFilter { return &filters.FilterHasEnoughCapacity{} },
+ "filter_host_instructions": func() NovaFilter { return &filters.FilterHostInstructionsStep{} },
+ "filter_instance_group_affinity": func() NovaFilter { return &filters.FilterInstanceGroupAffinityStep{} },
+ "filter_instance_group_anti_affinity": func() NovaFilter { return &filters.FilterInstanceGroupAntiAffinityStep{} },
+ "filter_live_migratable": func() NovaFilter { return &filters.FilterLiveMigratableStep{} },
+ "filter_requested_destination": func() NovaFilter { return &filters.FilterRequestedDestinationStep{} },
}
+type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
+
// Configuration of weighers supported by the nova scheduler.
-var supportedWeighers = map[string]func() NovaStep{
- "vmware_anti_affinity_noisy_projects": func() NovaStep { return &weighers.VMwareAntiAffinityNoisyProjectsStep{} },
- "vmware_avoid_long_term_contended_hosts": func() NovaStep { return &weighers.VMwareAvoidLongTermContendedHostsStep{} },
- "vmware_avoid_short_term_contended_hosts": func() NovaStep { return &weighers.VMwareAvoidShortTermContendedHostsStep{} },
- "vmware_hana_binpacking": func() NovaStep { return &weighers.VMwareHanaBinpackingStep{} },
- "vmware_general_purpose_balancing": func() NovaStep { return &weighers.VMwareGeneralPurposeBalancingStep{} },
+var supportedWeighers = map[string]func() NovaWeigher{
+ "vmware_anti_affinity_noisy_projects": func() NovaWeigher { return &weighers.VMwareAntiAffinityNoisyProjectsStep{} },
+ "vmware_avoid_long_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidLongTermContendedHostsStep{} },
+ "vmware_avoid_short_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidShortTermContendedHostsStep{} },
+ "vmware_hana_binpacking": func() NovaWeigher { return &weighers.VMwareHanaBinpackingStep{} },
+ "vmware_general_purpose_balancing": func() NovaWeigher { return &weighers.VMwareGeneralPurposeBalancingStep{} },
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index 265bffa24..acacc6ea6 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -19,7 +19,7 @@ type NodeAffinityFilter struct {
Alias string
}
-func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index 45ae98067..c668e5f0a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -18,7 +18,7 @@ type NodeAvailableFilter struct {
Alias string
}
-func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 44d185580..70e897b6a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -19,7 +19,7 @@ type NodeCapacityFilter struct {
Alias string
}
-func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 3cd328a50..08fbf1cd4 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -18,7 +18,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 82135b161..697c41466 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -18,7 +18,7 @@ type TaintFilter struct {
Alias string
}
-func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index 8c1e19bde..1e4defe8d 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -10,7 +10,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/delegation/pods"
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/helpers"
- scheduling "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
corev1 "k8s.io/api/core/v1"
)
@@ -28,10 +28,10 @@ func (o BinpackingStepOpts) Validate() error {
}
type BinpackingStep struct {
- scheduling.Weigher[api.PodPipelineRequest, BinpackingStepOpts]
+ lib.BaseWeigher[api.PodPipelineRequest, BinpackingStepOpts]
}
-func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*scheduling.StepResult, error) {
+func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.StepResult, error) {
result := s.PrepareResult(request)
podResources := helpers.GetPodResourceRequests(request.Pod)
diff --git a/internal/scheduling/decisions/pods/supported_steps.go b/internal/scheduling/decisions/pods/supported_steps.go
index cbb27c4f4..43c8f1ac2 100644
--- a/internal/scheduling/decisions/pods/supported_steps.go
+++ b/internal/scheduling/decisions/pods/supported_steps.go
@@ -10,17 +10,19 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type PodStep = lib.Step[pods.PodPipelineRequest]
+type PodFilter = lib.Filter[pods.PodPipelineRequest]
// Configuration of filters supported by the pods scheduler.
-var supportedFilters = map[string]func() PodStep{
- "noop": func() PodStep { return &filters.NoopFilter{} },
- "taint": func() PodStep { return &filters.TaintFilter{} },
- "nodeaffinity": func() PodStep { return &filters.NodeAffinityFilter{} },
- "nodecapacity": func() PodStep { return &filters.NodeCapacityFilter{} },
+var supportedFilters = map[string]func() PodFilter{
+ "noop": func() PodFilter { return &filters.NoopFilter{} },
+ "taint": func() PodFilter { return &filters.TaintFilter{} },
+ "nodeaffinity": func() PodFilter { return &filters.NodeAffinityFilter{} },
+ "nodecapacity": func() PodFilter { return &filters.NodeCapacityFilter{} },
}
+type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
+
// Configuration of weighers supported by the pods scheduler.
-var supportedWeighers = map[string]func() PodStep{
- "binpack": func() PodStep { return &weighers.BinpackingStep{} },
+var supportedWeighers = map[string]func() PodWeigher{
+ "binpack": func() PodWeigher { return &weighers.BinpackingStep{} },
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 1f10afa47..c0c12c5a0 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -30,30 +30,30 @@ type pipeline[RequestType PipelineRequest] struct {
// The order in which filters are applied, by their step name.
filtersOrder []string
// The filters by their name.
- filters map[string]Step[RequestType]
+ filters map[string]Filter[RequestType]
// The order in which weighers are applied, by their step name.
weighersOrder []string
// The weighers by their name.
- weighers map[string]Step[RequestType]
+ weighers map[string]Weigher[RequestType]
// Monitor to observe the pipeline.
monitor PipelineMonitor
}
-type StepWrapper[RequestType PipelineRequest] func(
+type StepWrapper[RequestType PipelineRequest, SpecType v1alpha1.Step] func(
ctx context.Context,
client client.Client,
- step v1alpha1.StepSpec,
- impl Step[RequestType],
-) (Step[RequestType], error)
+ step SpecType,
+ impl Step[RequestType, SpecType],
+) (Step[RequestType, SpecType], error)
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
- supportedFilters map[string]func() Step[RequestType],
+ supportedFilters map[string]func() Filter[RequestType],
confedFilters []v1alpha1.FilterSpec,
- supportedWeighers map[string]func() Step[RequestType],
+ supportedWeighers map[string]func() Weigher[RequestType],
confedWeighers []v1alpha1.WeigherSpec,
monitor PipelineMonitor,
) (Pipeline[RequestType], error) {
@@ -68,7 +68,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all filters from the configuration.
- filtersByName := make(map[string]Step[RequestType], len(confedFilters))
+ filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
filtersOrder := []string{}
for _, filterConfig := range confedFilters {
slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
@@ -78,8 +78,8 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
return nil, errors.New("unsupported filter name: " + filterConfig.Name)
}
filter := makeFilter()
- filter = monitorStep(ctx, client, filterConfig.StepSpec, filter, pipelineMonitor)
- if err := filter.Init(ctx, client, filterConfig.StepSpec); err != nil {
+ filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
+ if err := filter.Init(ctx, client, filterConfig); err != nil {
return nil, errors.New("failed to initialize filter: " + err.Error())
}
filtersByName[filterConfig.Name] = filter
@@ -88,7 +88,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all weighers from the configuration.
- weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
+ weighersByName := make(map[string]Weigher[RequestType], len(confedWeighers))
weighersOrder := []string{}
for _, weigherConfig := range confedWeighers {
slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
@@ -99,8 +99,8 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
}
weigher := makeWeigher()
weigher = validateWeigher(weigher)
- weigher = monitorStep(ctx, client, weigherConfig.StepSpec, weigher, pipelineMonitor)
- if err := weigher.Init(ctx, client, weigherConfig.StepSpec); err != nil {
+ weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
+ if err := weigher.Init(ctx, client, weigherConfig); err != nil {
return nil, errors.New("failed to initialize pipeline step: " + err.Error())
}
weighersByName[weigherConfig.Name] = weigher
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index f32c5377b..dcb1f4e02 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -18,7 +18,7 @@ type mockFilter struct {
name string
}
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
@@ -36,7 +36,7 @@ type mockWeigher struct {
name string
}
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
return nil
}
@@ -52,13 +52,13 @@ func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &pipeline[mockPipelineRequest]{
- filters: map[string]Step[mockPipelineRequest]{
+ filters: map[string]Filter[mockPipelineRequest]{
"mock_filter": &mockFilter{
name: "mock_filter",
},
},
filtersOrder: []string{"mock_filter"},
- weighers: map[string]Step[mockPipelineRequest]{
+ weighers: map[string]Weigher[mockPipelineRequest]{
"mock_weigher": &mockWeigher{
name: "mock_weigher",
},
@@ -136,7 +136,7 @@ func TestPipeline_NormalizeNovaWeights(t *testing.T) {
func TestPipeline_ApplyStepWeights(t *testing.T) {
p := &pipeline[mockPipelineRequest]{
- weighers: map[string]Step[mockPipelineRequest]{},
+ weighers: map[string]Weigher[mockPipelineRequest]{},
weighersOrder: []string{"step1", "step2"},
}
@@ -214,7 +214,7 @@ func TestPipeline_RunFilters(t *testing.T) {
filtersOrder: []string{
"mock_filter",
},
- filters: map[string]Step[mockPipelineRequest]{
+ filters: map[string]Filter[mockPipelineRequest]{
"mock_filter": mockStep,
},
}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 5e533a261..148224b37 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -30,9 +30,9 @@ type EmptyStepOpts struct{}
func (EmptyStepOpts) Validate() error { return nil }
// Interface for a scheduler step.
-type Step[RequestType PipelineRequest] interface {
+type Step[RequestType PipelineRequest, SpecType v1alpha1.Step] interface {
// Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+ Init(ctx context.Context, client client.Client, step SpecType) error
// Run this step of the scheduling pipeline.
// Return a map of keys to activation values. Important: keys that are
// not in the map are considered as filtered out.
@@ -41,9 +41,15 @@ type Step[RequestType PipelineRequest] interface {
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
+// Step that acts as a weigher in the scheduling pipeline.
+type Weigher[RequestType PipelineRequest] = Step[RequestType, v1alpha1.WeigherSpec]
+
+// Step that acts as a filter in the scheduling pipeline.
+type Filter[RequestType PipelineRequest] = Step[RequestType, v1alpha1.FilterSpec]
+
// Common base for all steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
+type BaseStep[RequestType PipelineRequest, Opts StepOpts, SpecType v1alpha1.Step] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The activation function to use.
@@ -54,15 +60,15 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
// Common base implementation of a weigher step.
// Functionally identical to BaseStep, but used for clarity.
-type Weigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts]
+type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.WeigherSpec]
// Common base implementation of a filter step.
// Functionally identical to BaseStep, but used for clarity.
-type Filter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts]
+type BaseFilter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.FilterSpec]
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- opts := conf.NewRawOptsBytes(step.Opts.Raw)
+func (s *BaseStep[RequestType, Opts, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
+ opts := conf.NewRawOptsBytes(step.GetOpts().Raw)
if err := s.Load(opts); err != nil {
return err
}
@@ -75,7 +81,7 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl
}
// Get a default result (no action) for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts]) PrepareResult(request RequestType) *StepResult {
+func (s *BaseStep[RequestType, Opts, SpecType]) PrepareResult(request RequestType) *StepResult {
activations := make(map[string]float64)
for _, subject := range request.GetSubjects() {
activations[subject] = s.NoEffect()
@@ -85,7 +91,7 @@ func (s *BaseStep[RequestType, Opts]) PrepareResult(request RequestType) *StepRe
}
// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts]) PrepareStats(request PipelineRequest, unit string) StepStatistics {
+func (s *BaseStep[RequestType, Opts, SpecType]) PrepareStats(request PipelineRequest, unit string) StepStatistics {
return StepStatistics{
Unit: unit,
Subjects: make(map[string]float64, len(request.GetSubjects())),
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 2e361c1b3..3f0f7ea2f 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest] struct {
+type StepMonitor[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType]
+ Step Step[RequestType, SpecType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -44,32 +44,32 @@ type StepMonitor[RequestType PipelineRequest] struct {
}
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *StepMonitor[RequestType, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest](
+func monitorStep[RequestType PipelineRequest, SpecType v1alpha1.Step](
_ context.Context,
_ client.Client,
- step v1alpha1.StepSpec,
- impl Step[RequestType],
+ step SpecType,
+ impl Step[RequestType, SpecType],
m PipelineMonitor,
-) *StepMonitor[RequestType] {
+) *StepMonitor[RequestType, SpecType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, step.GetName())
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, step.GetName())
}
- return &StepMonitor[RequestType]{
+ return &StepMonitor[RequestType, SpecType]{
Step: impl,
- stepName: step.Name,
+ stepName: step.GetName(),
pipelineName: m.PipelineName,
runTimer: runTimer,
stepSubjectWeight: m.stepSubjectWeight,
@@ -80,7 +80,7 @@ func monitorStep[RequestType PipelineRequest](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType, SpecType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index c248ec576..fea4e04e7 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -7,6 +7,8 @@ import (
"log/slog"
"os"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
type mockObserver struct {
@@ -21,9 +23,9 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest]{
+ monitor := &StepMonitor[mockPipelineRequest, v1alpha1.WeigherSpec]{
stepName: "mock_step",
- Step: &mockStep[mockPipelineRequest]{
+ Step: &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index 31d335cd3..d9f298fb8 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -11,15 +11,15 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest] struct {
- InitFunc func(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+type mockStep[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
+ InitFunc func(ctx context.Context, client client.Client, step SpecType) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockStep[RequestType, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockStep[RequestType, SpecType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index 629ba7b6b..a86e19ec5 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -15,17 +15,17 @@ import (
// Wrapper for scheduler steps that validates them before/after execution.
type WeigherValidator[RequestType PipelineRequest] struct {
// The wrapped weigher to validate.
- Weigher Step[RequestType]
+ Weigher Weigher[RequestType]
}
// Initialize the wrapped weigher with the database and options.
-func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
slog.Info("scheduler: init validation for step", "name", step.Name)
return s.Weigher.Init(ctx, client, step)
}
// Validate the wrapped weigher with the database and options.
-func validateWeigher[RequestType PipelineRequest](weigher Step[RequestType]) *WeigherValidator[RequestType] {
+func validateWeigher[RequestType PipelineRequest](weigher Weigher[RequestType]) *WeigherValidator[RequestType] {
return &WeigherValidator[RequestType]{Weigher: weigher}
}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index aa6cba851..b97796c3f 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -7,10 +7,12 @@ import (
"log/slog"
"reflect"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
@@ -45,7 +47,7 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
From 4dadf0d1d0ca5ffdb9e7b65331196a37962e7733 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 10:33:45 +0100
Subject: [PATCH 06/41] Rename PrepareResult -> IncludeAllHostsFromRequest
---
.../weighers/netapp_cpu_usage_balancing.go | 2 +-
.../filters/filter_allowed_projects.go | 2 +-
.../plugins/filters/filter_capabilities.go | 2 +-
.../nova/plugins/filters/filter_correct_az.go | 2 +-
.../filters/filter_external_customer.go | 2 +-
.../filters/filter_has_accelerators.go | 2 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filters/filter_has_requested_traits.go | 2 +-
.../filters/filter_host_instructions.go | 2 +-
.../filters/filter_instance_group_affinity.go | 2 +-
.../filter_instance_group_anti_affinity.go | 2 +-
.../plugins/filters/filter_live_migratable.go | 2 +-
.../plugins/filters/filter_maintenance.go | 2 +-
.../filters/filter_packed_virtqueue.go | 2 +-
.../filters/filter_requested_destination.go | 2 +-
.../filters/filter_status_conditions.go | 2 +-
.../vmware_anti_affinity_noisy_projects.go | 2 +-
.../vmware_avoid_long_term_contended_hosts.go | 2 +-
...vmware_avoid_short_term_contended_hosts.go | 2 +-
.../vmware_general_purpose_balancing.go | 2 +-
.../weighers/vmware_hana_binpacking.go | 2 +-
.../pods/plugins/weighers/binpack.go | 2 +-
internal/scheduling/lib/pipeline.go | 8 ++---
internal/scheduling/lib/step.go | 30 +++++++++++++------
internal/scheduling/lib/step_monitor.go | 18 +++++------
internal/scheduling/lib/step_test.go | 8 ++---
26 files changed, 60 insertions(+), 48 deletions(-)
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index 862e95153..97a8d0d86 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -50,7 +50,7 @@ type NetappCPUUsageBalancingStep struct {
// Downvote hosts that are highly contended.
func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
result.Statistics["avg cpu contention"] = s.PrepareStats(request, "%")
result.Statistics["max cpu contention"] = s.PrepareStats(request, "%")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index 37c5e439d..96815c618 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -20,7 +20,7 @@ type FilterAllowedProjectsStep struct {
// Lock certain hosts for certain projects, based on the hypervisor spec.
// Note that hosts without specified projects are still accessible.
func (s *FilterAllowedProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.ProjectID == "" {
traceLog.Info("no project ID in request, skipping filter")
return result, nil
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index 6a9c8bf83..80dfa5b3c 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -45,7 +45,7 @@ func hvToNovaCapabilities(hv hv1.Hypervisor) (map[string]string, error) {
// Check the capabilities of each host and if they match the extra spec provided
// in the request spec flavor.
func (s *FilterCapabilitiesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
requestedCapabilities := request.Spec.Data.Flavor.Data.ExtraSpecs
if len(requestedCapabilities) == 0 {
traceLog.Debug("no flavor extra spec capabilities in request, skipping filter")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index 9490f7cfb..dfcdc9f4b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -18,7 +18,7 @@ type FilterCorrectAZStep struct {
// Only get hosts in the requested az.
func (s *FilterCorrectAZStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.AvailabilityZone == "" {
traceLog.Info("no availability zone requested, skipping filter_correct_az step")
return result, nil
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index 8803a05db..b995be916 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -34,7 +34,7 @@ type FilterExternalCustomerStep struct {
// Prefix-match the domain name for external customer domains and filter out hosts
// that are not intended for external customers.
func (s *FilterExternalCustomerStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
domainName, err := request.Spec.Data.GetSchedulerHintStr("domain_name")
if err != nil {
return nil, err
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 24de1e726..0a5b1339f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -19,7 +19,7 @@ type FilterHasAcceleratorsStep struct {
// If requested, only get hosts with accelerators.
func (s *FilterHasAcceleratorsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
extraSpecs := request.Spec.Data.Flavor.Data.ExtraSpecs
if _, ok := extraSpecs["accel:device_profile"]; !ok {
traceLog.Debug("no accelerators requested")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index 838afbe4b..4b07ef56c 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -41,7 +41,7 @@ type FilterHasEnoughCapacity struct {
//
// Please also note that disk space is currently not considered by this filter.
func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
// This map holds the free resources per host.
freeResourcesByHost := make(map[string]map[string]resource.Quantity)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index b1681f6b6..35367dff3 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -22,7 +22,7 @@ type FilterHasRequestedTraits struct {
// - "trait:": "forbidden" means the host must not have the specified trait.
// - "trait:": "required" means the host must have the specified trait.
func (s *FilterHasRequestedTraits) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
var requiredTraits, forbiddenTraits []string
for key, value := range request.Spec.Data.Flavor.Data.ExtraSpecs {
if !strings.HasPrefix(key, "trait:") {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index 8f21bddc4..cd57e2e4d 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -19,7 +19,7 @@ type FilterHostInstructionsStep struct {
// - spec.ignore_hosts: Filter out all hosts in this list.
// - spec.force_hosts: Include only hosts in this list.
func (s *FilterHostInstructionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.IgnoreHosts != nil {
for _, host := range *request.Spec.Data.IgnoreHosts {
delete(result.Activations, host)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index a6b6f48b2..fb42e7c19 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -21,7 +21,7 @@ func (s *FilterInstanceGroupAffinityStep) Run(
request api.ExternalSchedulerRequest,
) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
ig := request.Spec.Data.InstanceGroup
if ig == nil {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index b6a936074..e9390d9c3 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -24,7 +24,7 @@ func (s *FilterInstanceGroupAntiAffinityStep) Run(
request api.ExternalSchedulerRequest,
) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
ig := request.Spec.Data.InstanceGroup
if ig == nil {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index 52b8cb919..f31e72516 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -66,7 +66,7 @@ func (s *FilterLiveMigratableStep) Run(
request api.ExternalSchedulerRequest,
) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if !s.isLiveMigration(request) {
traceLog.Debug("not a live migration request, skipping filter")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index 57136d451..a8d386c4d 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -18,7 +18,7 @@ type FilterMaintenanceStep struct {
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
func (s *FilterMaintenanceStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
hvs := &hv1.HypervisorList{}
if err := s.Client.List(context.Background(), hvs); err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index 4bd6b8caf..bb443ef57 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -19,7 +19,7 @@ type FilterPackedVirtqueueStep struct {
// If requested, only get hosts with packed virtqueues.
func (s *FilterPackedVirtqueueStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
// We don't care about the value.
_, reqInSpecs := request.Spec.Data.Flavor.Data.ExtraSpecs["hw:virtio_packed_ring"]
_, reqInProps := request.Spec.Data.Image.Data.Properties.Data["hw_virtio_packed_ring"]
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index 17d2339d8..c9f0319fb 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -25,7 +25,7 @@ func (s *FilterRequestedDestinationStep) Run(
request api.ExternalSchedulerRequest,
) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
rd := request.Spec.Data.RequestedDestination
if rd == nil {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index 40ee90a4e..870aaa58b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -21,7 +21,7 @@ type FilterStatusConditionsStep struct {
// Check that all status conditions meet the expected values, for example,
// that the hypervisor is ready and not disabled.
func (s *FilterStatusConditionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
hvs := &hv1.HypervisorList{}
if err := s.Client.List(context.Background(), hvs); err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 4ee355f58..3d04ce8e1 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -41,7 +41,7 @@ type VMwareAntiAffinityNoisyProjectsStep struct {
// Downvote the hosts a project is currently running on if it's noisy.
func (s *VMwareAntiAffinityNoisyProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
slog.Debug("Skipping general purpose balancing step for non-VMware VM")
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index aca9380ec..14396e165 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -50,7 +50,7 @@ type VMwareAvoidLongTermContendedHostsStep struct {
// Downvote hosts that are highly contended.
func (s *VMwareAvoidLongTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
slog.Debug("Skipping general purpose balancing step for non-VMware VM")
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index 65d75fb91..34f3514e9 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -50,7 +50,7 @@ type VMwareAvoidShortTermContendedHostsStep struct {
// Downvote hosts that are highly contended.
func (s *VMwareAvoidShortTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
slog.Debug("Skipping general purpose balancing step for non-VMware VM")
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index 6973b749c..e3b54d972 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -40,7 +40,7 @@ type VMwareGeneralPurposeBalancingStep struct {
// Pack VMs on hosts based on their flavor.
func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
// Don't execute the step for non-hana flavors.
if strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
slog.Debug("Skipping general purpose balancing step for HANA flavor", "flavor", request.Spec.Data.Flavor.Data.Name)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index 7993841c6..28915a03a 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -40,7 +40,7 @@ type VMwareHanaBinpackingStep struct {
// Pack VMs on hosts based on their flavor.
func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
// Don't execute the step for non-hana flavors.
if !strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
slog.Debug("Skipping hana binpacking step for non-HANA flavor", "flavor", request.Spec.Data.Flavor.Data.Name)
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index 1e4defe8d..65ca207ab 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -32,7 +32,7 @@ type BinpackingStep struct {
}
func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.StepResult, error) {
- result := s.PrepareResult(request)
+ result := s.IncludeAllHostsFromRequest(request)
podResources := helpers.GetPodResourceRequests(request.Pod)
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index c0c12c5a0..82f3ef528 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -39,12 +39,12 @@ type pipeline[RequestType PipelineRequest] struct {
monitor PipelineMonitor
}
-type StepWrapper[RequestType PipelineRequest, SpecType v1alpha1.Step] func(
+type StepWrapper[RequestType PipelineRequest, StepType v1alpha1.Step] func(
ctx context.Context,
client client.Client,
- step SpecType,
- impl Step[RequestType, SpecType],
-) (Step[RequestType, SpecType], error)
+ step StepType,
+ impl Step[RequestType, StepType],
+) (Step[RequestType, StepType], error)
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 148224b37..caac1ebf7 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -30,13 +30,24 @@ type EmptyStepOpts struct{}
func (EmptyStepOpts) Validate() error { return nil }
// Interface for a scheduler step.
-type Step[RequestType PipelineRequest, SpecType v1alpha1.Step] interface {
+type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
// Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step SpecType) error
+ Init(ctx context.Context, client client.Client, step StepType) error
+
// Run this step of the scheduling pipeline.
- // Return a map of keys to activation values. Important: keys that are
- // not in the map are considered as filtered out.
- // Provide a traceLog that contains the global request id and should
+ //
+ // The request is immutable and modifications are stored in the result.
+ // This allows steps to be run in parallel (e.g. weighers) without passing
+ // mutable state around.
+ //
+ // All hosts that should not be filtered out must be included in the returned
+ // map of activations. I.e., filters implementing this interface should
+ // remove activations by omitting them from the returned map.
+ //
+ // Weighers implementing this interface should adjust activation
+ // values in the returned map, including all hosts from the request.
+ //
+ // A traceLog is provided that contains the global request id and should
// be used to log the step's execution.
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
@@ -49,7 +60,7 @@ type Filter[RequestType PipelineRequest] = Step[RequestType, v1alpha1.FilterSpec
// Common base for all steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts, SpecType v1alpha1.Step] struct {
+type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The activation function to use.
@@ -67,7 +78,7 @@ type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestT
type BaseFilter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.FilterSpec]
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
+func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
opts := conf.NewRawOptsBytes(step.GetOpts().Raw)
if err := s.Load(opts); err != nil {
return err
@@ -81,7 +92,8 @@ func (s *BaseStep[RequestType, Opts, SpecType]) Init(ctx context.Context, client
}
// Get a default result (no action) for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts, SpecType]) PrepareResult(request RequestType) *StepResult {
+// Use this to initialize the result before applying filtering/weighing logic.
+func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
activations := make(map[string]float64)
for _, subject := range request.GetSubjects() {
activations[subject] = s.NoEffect()
@@ -91,7 +103,7 @@ func (s *BaseStep[RequestType, Opts, SpecType]) PrepareResult(request RequestTyp
}
// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts, SpecType]) PrepareStats(request PipelineRequest, unit string) StepStatistics {
+func (s *BaseStep[RequestType, Opts, StepType]) PrepareStats(request RequestType, unit string) StepStatistics {
return StepStatistics{
Unit: unit,
Subjects: make(map[string]float64, len(request.GetSubjects())),
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 3f0f7ea2f..42601a0bb 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
+type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType, SpecType]
+ Step Step[RequestType, StepType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -44,18 +44,18 @@ type StepMonitor[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
}
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
+func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest, SpecType v1alpha1.Step](
+func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
_ context.Context,
_ client.Client,
- step SpecType,
- impl Step[RequestType, SpecType],
+ step StepType,
+ impl Step[RequestType, StepType],
m PipelineMonitor,
-) *StepMonitor[RequestType, SpecType] {
+) *StepMonitor[RequestType, StepType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
@@ -67,7 +67,7 @@ func monitorStep[RequestType PipelineRequest, SpecType v1alpha1.Step](
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
WithLabelValues(m.PipelineName, step.GetName())
}
- return &StepMonitor[RequestType, SpecType]{
+ return &StepMonitor[RequestType, StepType]{
Step: impl,
stepName: step.GetName(),
pipelineName: m.PipelineName,
@@ -80,7 +80,7 @@ func monitorStep[RequestType PipelineRequest, SpecType v1alpha1.Step](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType, SpecType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index d9f298fb8..a1940355f 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -11,15 +11,15 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest, SpecType v1alpha1.Step] struct {
- InitFunc func(ctx context.Context, client client.Client, step SpecType) error
+type mockStep[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
+ InitFunc func(ctx context.Context, client client.Client, step StepType) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType, SpecType]) Init(ctx context.Context, client client.Client, step SpecType) error {
+func (m *mockStep[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType, SpecType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockStep[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
From 7f7b5a8b5f9446a3a3fdafe2b118ea2c798f6f98 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 11:40:09 +0100
Subject: [PATCH 07/41] WIP
---
api/v1alpha1/pipeline_types.go | 89 +------
api/v1alpha1/zz_generated.deepcopy.go | 56 +----
config/crd/bases/cortex.cloud_pipelines.yaml | 123 +---------
config/crd/cortex.cloud_pipelines.yaml | 123 +---------
.../templates/crd/cortex.cloud_pipelines.yaml | 123 +---------
.../cinder/pipeline_controller_test.go | 36 +--
.../scheduling/decisions/machines/noop.go | 2 +-
.../machines/pipeline_controller_test.go | 24 +-
.../manila/pipeline_controller_test.go | 32 +--
.../nova/pipeline_controller_test.go | 60 ++---
.../pods/pipeline_controller_test.go | 24 +-
.../plugins/filters/filter_node_affinity.go | 2 +-
.../plugins/filters/filter_node_available.go | 2 +-
.../plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_noop.go | 2 +-
.../pods/plugins/filters/filter_taint.go | 2 +-
internal/scheduling/lib/pipeline.go | 13 +-
.../scheduling/lib/pipeline_controller.go | 123 ++++------
.../lib/pipeline_controller_test.go | 221 ++++--------------
internal/scheduling/lib/pipeline_test.go | 8 +-
internal/scheduling/lib/step.go | 67 ++++--
internal/scheduling/lib/step_monitor.go | 24 +-
internal/scheduling/lib/step_monitor_test.go | 6 +-
internal/scheduling/lib/step_test.go | 11 +-
internal/scheduling/lib/weigher_validation.go | 7 +-
.../scheduling/lib/weigher_validation_test.go | 6 +-
26 files changed, 284 insertions(+), 904 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 449ec0309..ee2a8285d 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -4,84 +4,16 @@
package v1alpha1
import (
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
-// Step as part of a cortex pipeline.
-type Step interface {
- // Every step must have options so the pipeline can configure it.
- GetOpts() runtime.RawExtension
- // Every step must have a name so the pipeline can identify it.
- GetName() string
- // Every step can have an optional description.
- GetDescription() string
-}
-
-// Filters remove host candidates from an initial set, leaving
-// valid candidates. Filters are run before weighers are applied, as
-// part of a filter-weigher scheduling pipeline.
-type FilterSpec struct {
- // The name of the scheduler step in the cortex implementation.
- // Must match to a step implemented by the pipeline controller.
- Name string `json:"name"`
-
- // Additional configuration for the extractor that can be used
- // +kubebuilder:validation:Optional
- Opts runtime.RawExtension `json:"opts,omitempty"`
-
- // Additional description of the step which helps understand its purpose
- // and decisions made by it.
- // +kubebuilder:validation:Optional
- Description string `json:"description,omitempty"`
-
- // Filters are not allowed to depend on knowledges, as knowledges can
- // be outdated leading to invalid filtering decisions.
-}
-
-func (f FilterSpec) GetOpts() runtime.RawExtension { return f.Opts }
-func (f FilterSpec) GetName() string { return f.Name }
-func (f FilterSpec) GetDescription() string { return f.Description }
-
-// Weighers assign weights to the remaining host candidates after filtering,
-// making some hosts more preferable than others. Weighers are run
-// after filters are applied, as part of a filter-weigher scheduling pipeline.
-type WeigherSpec struct {
- // The name of the scheduler step in the cortex implementation.
- // Must match to a step implemented by the pipeline controller.
- Name string `json:"name"`
-
- // Additional configuration for the extractor that can be used
- // +kubebuilder:validation:Optional
- Opts runtime.RawExtension `json:"opts,omitempty"`
-
- // Additional description of the step which helps understand its purpose
- // and decisions made by it.
- // +kubebuilder:validation:Optional
- Description string `json:"description,omitempty"`
-
- // Knowledges this step depends on to be ready.
- //
- // Weighers can depend on knowledges as they don't break valid placements,
- // they only make it more optimal.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
-}
-
-func (w WeigherSpec) GetOpts() runtime.RawExtension { return w.Opts }
-func (w WeigherSpec) GetName() string { return w.Name }
-func (w WeigherSpec) GetDescription() string { return w.Description }
-
-// Detectors find candidates for descheduling (migration off current host).
-// These detectors are run after weighers are applied, as part of a
-// descheduler scheduling pipeline.
-type DetectorSpec struct {
+type StepSpec struct {
// The name of the scheduler step in the cortex implementation.
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
- // Additional configuration for the extractor that can be used
+ // Additional configuration for the step.
// +kubebuilder:validation:Optional
Opts runtime.RawExtension `json:"opts,omitempty"`
@@ -89,19 +21,8 @@ type DetectorSpec struct {
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
-
- // Knowledges this step depends on to be ready.
- //
- // Detectors can depend on knowledges as they don't ensure valid placements
- // and therefore are not on the critical path.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
-func (d DetectorSpec) GetOpts() runtime.RawExtension { return d.Opts }
-func (d DetectorSpec) GetName() string { return d.Name }
-func (d DetectorSpec) GetDescription() string { return d.Description }
-
type PipelineType string
const (
@@ -144,14 +65,14 @@ type PipelineSpec struct {
// Filters remove host candidates from an initial set, leaving
// valid candidates. Filters are run before weighers are applied.
// +kubebuilder:validation:Optional
- Filters []FilterSpec `json:"filters,omitempty"`
+ Filters []StepSpec `json:"filters,omitempty"`
// Ordered list of weighers to apply in a scheduling pipeline.
//
// This attribute is set only if the pipeline type is filter-weigher.
// These weighers are run after filters are applied.
// +kubebuilder:validation:Optional
- Weighers []WeigherSpec `json:"weighers,omitempty"`
+ Weighers []StepSpec `json:"weighers,omitempty"`
// Ordered list of detectors to apply in a descheduling pipeline.
//
@@ -159,7 +80,7 @@ type PipelineSpec struct {
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied.
// +kubebuilder:validation:Optional
- Detectors []DetectorSpec `json:"detectors,omitempty"`
+ Detectors []StepSpec `json:"detectors,omitempty"`
}
const (
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 785455a08..ae02b8da2 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -425,43 +425,6 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
- *out = *in
- in.Opts.DeepCopyInto(&out.Opts)
- if in.Knowledges != nil {
- in, out := &in.Knowledges, &out.Knowledges
- *out = make([]v1.ObjectReference, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec.
-func (in *DetectorSpec) DeepCopy() *DetectorSpec {
- if in == nil {
- return nil
- }
- out := new(DetectorSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
- *out = *in
- in.Opts.DeepCopyInto(&out.Opts)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
-func (in *FilterSpec) DeepCopy() *FilterSpec {
- if in == nil {
- return nil
- }
- out := new(FilterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityDatasource) DeepCopyInto(out *IdentityDatasource) {
*out = *in
@@ -879,21 +842,21 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
- *out = make([]FilterSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Weighers != nil {
in, out := &in.Weighers, &out.Weighers
- *out = make([]WeigherSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Detectors != nil {
in, out := &in.Detectors, &out.Detectors
- *out = make([]DetectorSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1135,22 +1098,17 @@ func (in *StepResult) DeepCopy() *StepResult {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
+func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
in.Opts.DeepCopyInto(&out.Opts)
- if in.Knowledges != nil {
- in, out := &in.Knowledges, &out.Knowledges
- *out = make([]v1.ObjectReference, len(*in))
- copy(*out, *in)
- }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherSpec.
-func (in *WeigherSpec) DeepCopy() *WeigherSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
+func (in *StepSpec) DeepCopy() *StepSpec {
if in == nil {
return nil
}
- out := new(WeigherSpec)
+ out := new(StepSpec)
in.DeepCopyInto(out)
return out
}
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 794acf0b1..38c5ebc48 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -73,75 +73,19 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -156,10 +100,6 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -172,8 +112,7 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -205,75 +144,19 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 794acf0b1..38c5ebc48 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -73,75 +73,19 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -156,10 +100,6 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -172,8 +112,7 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -205,75 +144,19 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 679bab0fb..01aa46f5c 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -79,75 +79,19 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -162,10 +106,6 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -178,8 +118,7 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -211,75 +150,19 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 2b7cda1bf..7121f5166 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -175,8 +175,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
})
if err != nil {
@@ -284,8 +284,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -318,8 +318,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -372,8 +372,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -476,24 +476,24 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "test-plugin",
},
},
- weighers: []v1alpha1.WeigherSpec{
+ weighers: []v1alpha1.StepSpec{
{
Name: "test-plugin",
},
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index 6dfa7911b..3b0104aa6 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -15,7 +15,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 157b8ac13..c218486bb 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -211,26 +211,26 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{Name: "noop"},
},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{Name: "unsupported"},
},
expectError: true,
@@ -315,8 +315,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -349,8 +349,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -396,8 +396,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index d280f0e05..ea8e20c14 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -279,8 +279,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -313,8 +313,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -367,8 +367,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -471,19 +471,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "supported netapp step",
- weighers: []v1alpha1.WeigherSpec{
+ weighers: []v1alpha1.StepSpec{
{
Name: "netapp_cpu_usage_balancing",
Opts: runtime.RawExtension{
@@ -495,7 +495,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported-plugin",
},
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index b02face53..69ead2c8c 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -92,8 +92,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -121,8 +121,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -173,8 +173,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -264,19 +264,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "supported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
},
@@ -285,7 +285,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported-plugin",
},
@@ -294,7 +294,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with scoping options",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -306,7 +306,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with invalid scoping options",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -418,8 +418,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -430,8 +430,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -466,8 +466,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -478,8 +478,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -538,8 +538,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -550,8 +550,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -588,8 +588,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -626,8 +626,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 4d93a1720..4e6a6f249 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -186,19 +186,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "noop",
},
@@ -207,7 +207,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported",
},
@@ -292,8 +292,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -326,8 +326,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -373,8 +373,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index acacc6ea6..265bffa24 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -19,7 +19,7 @@ type NodeAffinityFilter struct {
Alias string
}
-func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index c668e5f0a..45ae98067 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -18,7 +18,7 @@ type NodeAvailableFilter struct {
Alias string
}
-func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 70e897b6a..44d185580 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -19,7 +19,7 @@ type NodeCapacityFilter struct {
Alias string
}
-func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 08fbf1cd4..3cd328a50 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -18,7 +18,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 697c41466..82135b161 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -18,7 +18,7 @@ type TaintFilter struct {
Alias string
}
-func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 82f3ef528..37a1ad3f1 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -27,7 +27,7 @@ type pipeline[RequestType PipelineRequest] struct {
// The activation function to use when combining the
// results of the scheduler steps.
ActivationFunction
- // The order in which filters are applied, by their step name.
+ // The order in which filters are executed, by their step name.
filtersOrder []string
// The filters by their name.
filters map[string]Filter[RequestType]
@@ -39,22 +39,15 @@ type pipeline[RequestType PipelineRequest] struct {
monitor PipelineMonitor
}
-type StepWrapper[RequestType PipelineRequest, StepType v1alpha1.Step] func(
- ctx context.Context,
- client client.Client,
- step StepType,
- impl Step[RequestType, StepType],
-) (Step[RequestType, StepType], error)
-
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
supportedFilters map[string]func() Filter[RequestType],
- confedFilters []v1alpha1.FilterSpec,
+ confedFilters []v1alpha1.StepSpec,
supportedWeighers map[string]func() Weigher[RequestType],
- confedWeighers []v1alpha1.WeigherSpec,
+ confedWeighers []v1alpha1.StepSpec,
monitor PipelineMonitor,
) (Pipeline[RequestType], error) {
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 50e3af497..d8028313c 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -8,7 +8,6 @@ import (
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
@@ -27,7 +26,27 @@ type PipelineInitializer[PipelineType any] interface {
// This method is delegated to the parent controller, when a pipeline needs
// to be newly initialized or re-initialized to update it in the pipeline
// map.
- InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (PipelineType, error)
+ //
+ // Ready and total indicate how many steps are ready out of the total
+ // configured steps. Sometimes, steps may be unready but this does not
+ // prevent the pipeline from being created, e.g., when weighers depend
+ // on knowledges that are not yet ready.
+ //
+ // If there was an error that blocks us from creating the pipeline, return
+ // a non-nil error.
+ InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (
+ pipeline PipelineType,
+ ready int,
+ total int,
+ err error,
+ )
+
+ // Collect knowledge dependencies for the given pipeline.
+ //
+ // This is used to determine which pipelines depend on which knowledges
+ // when a knowledge changes.
+ CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string
+
// Get the accepted pipeline type for this controller.
//
// This is used to filter pipelines when listing existing pipelines on
@@ -89,28 +108,10 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
log := ctrl.LoggerFrom(ctx)
old := obj.DeepCopy()
- // Check if all steps are ready. If not, check if the step is mandatory.
- obj.Status.TotalSteps = len(obj.Spec.Filters) + len(obj.Spec.Weighers) + len(obj.Spec.Detectors)
- obj.Status.ReadySteps = 0
- for range obj.Spec.Filters { // Could use len() directly but want to keep the pattern.
- // If needed, check if this filter needs any dependencies. For now,
- // as filters do not depend on knowledges, we skip this.
- obj.Status.ReadySteps++
- }
- for _, detector := range obj.Spec.Detectors {
- if err := c.checkAllKnowledgesReady(ctx, detector.Knowledges); err == nil {
- obj.Status.ReadySteps++
- }
- }
- for _, weigher := range obj.Spec.Weighers {
- if err := c.checkAllKnowledgesReady(ctx, weigher.Knowledges); err == nil {
- obj.Status.ReadySteps++
- }
- }
- obj.Status.StepsReadyFrac = fmt.Sprintf("%d/%d", obj.Status.ReadySteps, obj.Status.TotalSteps)
-
var err error
- c.Pipelines[obj.Name], err = c.Initializer.InitPipeline(ctx, *obj)
+ c.Pipelines[obj.Name], obj.Status.ReadySteps, obj.Status.TotalSteps, err = c.
+ Initializer.InitPipeline(ctx, *obj)
+ obj.Status.StepsReadyFrac = fmt.Sprintf("%d/%d", obj.Status.ReadySteps, obj.Status.TotalSteps)
c.PipelineConfigs[obj.Name] = *obj
if err != nil {
log.Error(err, "failed to create pipeline", "pipelineName", obj.Name)
@@ -184,45 +185,6 @@ func (c *BasePipelineController[PipelineType]) HandlePipelineDeleted(
delete(c.PipelineConfigs, pipelineConf.Name)
}
-// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (c *BasePipelineController[PipelineType]) checkAllKnowledgesReady(
- ctx context.Context,
- objects []corev1.ObjectReference,
-) error {
-
- log := ctrl.LoggerFrom(ctx)
- // Check the status of all knowledges depending on this step.
- readyKnowledges := 0
- totalKnowledges := len(objects)
- for _, objRef := range objects {
- knowledge := &v1alpha1.Knowledge{}
- if err := c.Get(ctx, client.ObjectKey{
- Name: objRef.Name,
- Namespace: objRef.Namespace,
- }, knowledge); err != nil {
- log.Error(err, "failed to get knowledge depending on step", "knowledgeName", objRef.Name)
- continue
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- log.Info("knowledge not ready due to error condition", "knowledgeName", objRef.Name)
- continue
- }
- if knowledge.Status.RawLength == 0 {
- log.Info("knowledge not ready, no data available", "knowledgeName", objRef.Name)
- continue
- }
- readyKnowledges++
- }
- if readyKnowledges != totalKnowledges {
- return fmt.Errorf(
- "%d/%d knowledges ready",
- readyKnowledges, totalKnowledges,
- )
- }
- return nil
-}
-
// Handle a knowledge creation, update, or delete event from watching knowledge resources.
func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
ctx context.Context,
@@ -234,37 +196,32 @@ func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
return
}
log := ctrl.LoggerFrom(ctx)
- log.Info("knowledge changed, re-evaluating dependent pipelines", "knowledgeName", obj.Name)
- // Find all pipelines depending on this knowledge and re-evaluate them.
+ log.Info("knowledge changed readiness/availability, re-evaluating pipelines", "knowledgeName", obj.Name)
var pipelines v1alpha1.PipelineList
if err := c.List(ctx, &pipelines); err != nil {
log.Error(err, "failed to list pipelines for knowledge", "knowledgeName", obj.Name)
return
}
for _, pipeline := range pipelines.Items {
- needsUpdate := false
- // For filter-weigher pipelines, only weighers may depend on knowledges.
- for _, step := range pipeline.Spec.Weighers {
- for _, knowledgeRef := range step.Knowledges {
- if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
- needsUpdate = true
- break
- }
- }
+ if pipeline.Spec.SchedulingDomain != c.SchedulingDomain {
+ continue
+ }
+ if pipeline.Spec.Type != c.Initializer.PipelineType() {
+ continue
}
- // Check descheduler pipelines where detectors may depend on knowledges.
- for _, step := range pipeline.Spec.Detectors {
- for _, knowledgeRef := range step.Knowledges {
- if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
- needsUpdate = true
- break
- }
+ knowledgeDeps := c.Initializer.CollectKnowledgeDependencies(pipeline)
+ found := false
+ for _, knowledgeName := range knowledgeDeps {
+ if knowledgeName == obj.Name {
+ found = true
+ break
}
}
- if needsUpdate {
- log.Info("re-evaluating pipeline due to knowledge change", "pipelineName", pipeline.Name)
- c.handlePipelineChange(ctx, &pipeline, queue)
+ if !found {
+ continue
}
+ log.Info("re-evaluating pipeline due to knowledge change", "pipelineName", pipeline.Name, "knowledgeName", obj.Name)
+ c.handlePipelineChange(ctx, &pipeline, queue)
}
}
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 7a85999b6..76dd08e53 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -25,15 +25,23 @@ type mockPipeline struct {
// Mock PipelineInitializer for testing
type mockPipelineInitializer struct {
- pipelineType v1alpha1.PipelineType
- initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error)
+ pipelineType v1alpha1.PipelineType
+ initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error)
+ collectKnowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
}
-func (m *mockPipelineInitializer) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
+func (m *mockPipelineInitializer) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error) {
if m.initPipelineFunc != nil {
return m.initPipelineFunc(ctx, p)
}
- return mockPipeline{name: p.Name}, nil
+ return mockPipeline{name: p.Name}, 0, 0, nil
+}
+
+func (m *mockPipelineInitializer) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
+ if m.collectKnowledgeDependenciesFunc != nil {
+ return m.collectKnowledgeDependenciesFunc(p)
+ }
+ return nil
}
func (m *mockPipelineInitializer) PipelineType() v1alpha1.PipelineType {
@@ -72,8 +80,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
},
@@ -92,8 +100,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -103,8 +111,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -114,8 +122,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeDescheduler,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -125,8 +133,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
},
@@ -202,12 +210,12 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{
+ Filters: []v1alpha1.StepSpec{
{
Name: "test-filter",
},
},
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
},
@@ -241,7 +249,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -265,7 +273,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -283,7 +291,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -311,8 +319,8 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
}
if tt.initPipelineError {
- initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
- return mockPipeline{}, context.Canceled
+ initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error) {
+ return mockPipeline{}, 0, 0, context.Canceled
}
}
@@ -361,8 +369,8 @@ func TestBasePipelineController_HandlePipelineCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
}
@@ -406,8 +414,8 @@ func TestBasePipelineController_HandlePipelineUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
}
@@ -481,157 +489,6 @@ func TestBasePipelineController_HandlePipelineDeleted(t *testing.T) {
}
}
-func TestBasePipelineController_checkAllKnowledgesReady(t *testing.T) {
- scheme := runtime.NewScheme()
- if err := v1alpha1.AddToScheme(scheme); err != nil {
- t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
- }
-
- tests := []struct {
- name string
- knowledges []v1alpha1.Knowledge
- expectError bool
- }{
- {
- name: "no knowledges",
- knowledges: []v1alpha1.Knowledge{},
- expectError: false,
- },
- {
- name: "ready knowledge",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- },
- expectError: false,
- },
- {
- name: "knowledge in error state",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "error-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- Conditions: []metav1.Condition{
- {
- Type: v1alpha1.KnowledgeConditionReady,
- Status: metav1.ConditionFalse,
- },
- },
- },
- },
- },
- expectError: true,
- },
- {
- name: "knowledge with no data",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "no-data-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 0,
- },
- },
- },
- expectError: true,
- },
- {
- name: "multiple knowledges, all ready",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "knowledge-1",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "knowledge-2",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 5,
- },
- },
- },
- expectError: false,
- },
- {
- name: "multiple knowledges, some not ready",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "not-ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 0,
- },
- },
- },
- expectError: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- objects := make([]client.Object, len(tt.knowledges))
- for i := range tt.knowledges {
- objects[i] = &tt.knowledges[i]
- }
-
- fakeClient := fake.NewClientBuilder().
- WithScheme(scheme).
- WithObjects(objects...).
- Build()
-
- controller := &BasePipelineController[mockPipeline]{
- Client: fakeClient,
- }
-
- objectReferences := make([]corev1.ObjectReference, len(tt.knowledges))
- for i, k := range tt.knowledges {
- objectReferences[i] = corev1.ObjectReference{
- Name: k.Name,
- Namespace: k.Namespace,
- }
- }
- err := controller.checkAllKnowledgesReady(context.Background(), objectReferences)
-
- if tt.expectError && err == nil {
- t.Error("Expected error but got none")
- }
- if !tt.expectError && err != nil {
- t.Errorf("Expected no error but got: %v", err)
- }
- })
- }
-}
-
func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
@@ -667,7 +524,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -684,7 +541,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -717,7 +574,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -794,7 +651,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -945,7 +802,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -1013,7 +870,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index dcb1f4e02..95be1919e 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -18,7 +18,7 @@ type mockFilter struct {
name string
}
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
@@ -36,7 +36,7 @@ type mockWeigher struct {
name string
}
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
@@ -49,6 +49,10 @@ func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*
}, nil
}
+func (m *mockWeigher) RequiredKnowledges() []string {
+ return []string{}
+}
+
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &pipeline[mockPipelineRequest]{
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index caac1ebf7..d7cd4e9e9 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -10,6 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/pkg/conf"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -30,9 +31,9 @@ type EmptyStepOpts struct{}
func (EmptyStepOpts) Validate() error { return nil }
// Interface for a scheduler step.
-type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
+type Step[RequestType PipelineRequest] interface {
// Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step StepType) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
// Run this step of the scheduling pipeline.
//
@@ -52,15 +53,23 @@ type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-// Step that acts as a weigher in the scheduling pipeline.
-type Weigher[RequestType PipelineRequest] = Step[RequestType, v1alpha1.WeigherSpec]
-
// Step that acts as a filter in the scheduling pipeline.
-type Filter[RequestType PipelineRequest] = Step[RequestType, v1alpha1.FilterSpec]
+type Filter[RequestType PipelineRequest] = Step[RequestType]
+
+// Step that acts as a weigher in the scheduling pipeline.
+type Weigher[RequestType PipelineRequest] interface {
+ Step[RequestType]
+
+ // Weighers can define knowledges they depend on, which should be
+ // ready to be able to execute the weigher properly.
+ // The returned slice contains the names of the knowledges which
+ // can be found as kubernetes custom resources of kind Knowledge.
+ RequiredKnowledges() []string
+}
// Common base for all steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step] struct {
+type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The activation function to use.
@@ -69,17 +78,24 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step
Client client.Client
}
-// Common base implementation of a weigher step.
+// Common base implementation of a filter step.
// Functionally identical to BaseStep, but used for clarity.
-type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.WeigherSpec]
+type BaseFilter[RequestType PipelineRequest, Opts StepOpts] struct {
+ BaseStep[RequestType, Opts]
+}
-// Common base implementation of a filter step.
+// Common base implementation of a weigher step.
// Functionally identical to BaseStep, but used for clarity.
-type BaseFilter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.FilterSpec]
+type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] struct {
+ BaseStep[RequestType, Opts]
+}
+
+// Override to specify required knowledges for this weigher.
+func (s *BaseWeigher[RequestType, Opts]) RequiredKnowledges() []string { return []string{} }
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
- opts := conf.NewRawOptsBytes(step.GetOpts().Raw)
+func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
}
@@ -91,9 +107,30 @@ func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client
return nil
}
+// Weighers need to check if all dependency knowledges are available.
+func (s *BaseWeigher[RequestType, Opts]) Init(ctx context.Context, c client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, c, step); err != nil {
+ return err
+ }
+ for _, knowledgeName := range s.RequiredKnowledges() {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := c.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
+ return err
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return errors.New("knowledge not ready: " + knowledgeName)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return errors.New("knowledge has no data: " + knowledgeName)
+ }
+ }
+ return nil
+}
+
// Get a default result (no action) for the input weight keys given in the request.
// Use this to initialize the result before applying filtering/weighing logic.
-func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
+func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
activations := make(map[string]float64)
for _, subject := range request.GetSubjects() {
activations[subject] = s.NoEffect()
@@ -103,7 +140,7 @@ func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(reque
}
// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts, StepType]) PrepareStats(request RequestType, unit string) StepStatistics {
+func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
return StepStatistics{
Unit: unit,
Subjects: make(map[string]float64, len(request.GetSubjects())),
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 42601a0bb..2e361c1b3 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
+type StepMonitor[RequestType PipelineRequest] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType, StepType]
+ Step Step[RequestType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -44,32 +44,32 @@ type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
}
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
+func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
+func monitorStep[RequestType PipelineRequest](
_ context.Context,
_ client.Client,
- step StepType,
- impl Step[RequestType, StepType],
+ step v1alpha1.StepSpec,
+ impl Step[RequestType],
m PipelineMonitor,
-) *StepMonitor[RequestType, StepType] {
+) *StepMonitor[RequestType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, step.GetName())
+ WithLabelValues(m.PipelineName, step.Name)
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, step.GetName())
+ WithLabelValues(m.PipelineName, step.Name)
}
- return &StepMonitor[RequestType, StepType]{
+ return &StepMonitor[RequestType]{
Step: impl,
- stepName: step.GetName(),
+ stepName: step.Name,
pipelineName: m.PipelineName,
runTimer: runTimer,
stepSubjectWeight: m.stepSubjectWeight,
@@ -80,7 +80,7 @@ func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index fea4e04e7..c248ec576 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -7,8 +7,6 @@ import (
"log/slog"
"os"
"testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
type mockObserver struct {
@@ -23,9 +21,9 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ monitor := &StepMonitor[mockPipelineRequest]{
stepName: "mock_step",
- Step: &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ Step: &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index a1940355f..8d826bc7b 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -11,17 +11,20 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
- InitFunc func(ctx context.Context, client client.Client, step StepType) error
+type mockStep[RequestType PipelineRequest] struct {
+ InitFunc func(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
+func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
+func (m *mockStep[RequestType]) RequiredKnowledges() []string {
+ return []string{}
+}
type MockOptions struct {
Option1 string `json:"option1"`
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index a86e19ec5..e2a594de8 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -19,7 +19,7 @@ type WeigherValidator[RequestType PipelineRequest] struct {
}
// Initialize the wrapped weigher with the database and options.
-func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
slog.Info("scheduler: init validation for step", "name", step.Name)
return s.Weigher.Init(ctx, client, step)
}
@@ -52,3 +52,8 @@ func (s *WeigherValidator[RequestType]) Run(traceLog *slog.Logger, request Reque
}
return result, nil
}
+
+// Return the required knowledges for this weigher.
+func (s *WeigherValidator[RequestType]) RequiredKnowledges() []string {
+ return s.Weigher.RequiredKnowledges()
+}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index b97796c3f..aa6cba851 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -7,12 +7,10 @@ import (
"log/slog"
"reflect"
"testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ mockStep := &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
@@ -47,7 +45,7 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ mockStep := &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
From 1d424d752c0ab95e4b2eec5c34c636d6c7d07e65 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 14:35:26 +0100
Subject: [PATCH 08/41] Ran into the wrong direction
---
.../decisions/nova/pipeline_controller.go | 20 ++++-
.../nova/pipeline_controller_test.go | 6 +-
.../filters/filter_live_migratable_test.go | 12 ++-
.../filter_requested_destination_test.go | 8 +-
.../decisions/pods/pipeline_controller.go | 2 +-
.../scheduling/descheduling/nova/monitor.go | 8 +-
.../descheduling/nova/monitor_test.go | 20 +++--
.../scheduling/descheduling/nova/pipeline.go | 28 +++++--
.../descheduling/nova/pipeline_controller.go | 31 ++++++-
.../nova/pipeline_controller_test.go | 15 ++--
.../descheduling/nova/pipeline_test.go | 18 +++--
.../descheduling/nova/plugins/base.go | 6 +-
.../descheduling/nova/plugins/base_test.go | 2 +-
internal/scheduling/descheduling/nova/step.go | 7 +-
internal/scheduling/lib/pipeline.go | 44 +++++++---
.../lib/pipeline_controller_test.go | 81 ++++++++++---------
internal/scheduling/lib/step_monitor.go | 50 ++++++++++--
internal/scheduling/lib/step_monitor_test.go | 2 +-
18 files changed, 257 insertions(+), 103 deletions(-)
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go
index dfbd7a249..8abe91197 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller.go
@@ -151,7 +151,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
+) (lib.Pipeline[api.ExternalSchedulerRequest], int, int, error) {
return lib.NewFilterWeigherPipeline(
ctx, c.Client, p.Name,
@@ -161,6 +161,24 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
+func (c *DecisionPipelineController) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
+ dependencies := make(map[string]struct{})
+ for _, weigherConf := range p.Spec.Weighers {
+ weigher, ok := supportedWeighers[weigherConf.Name]
+ if !ok {
+ continue
+ }
+ for _, knowledgeName := range weigher().RequiredKnowledges() {
+ dependencies[knowledgeName] = struct{}{}
+ }
+ }
+ result := make([]string, 0, len(dependencies))
+ for knowledgeName := range dependencies {
+ result = append(result, knowledgeName)
+ }
+ return result
+}
+
func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainNova
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 69ead2c8c..fbcd1ad48 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -207,7 +207,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ pipeline, _, _, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
@@ -320,7 +320,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ pipeline, _, _, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -672,7 +672,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
// Setup runtime pipeline if needed
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
+ pipeline, _, _, err := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index edb33c5c0..b987e9c15 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -728,7 +728,9 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -813,7 +815,9 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -857,7 +861,9 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index 3ba008214..5952e4c3f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -495,7 +495,9 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
step := &FilterRequestedDestinationStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -576,7 +578,9 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
step := &FilterRequestedDestinationStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index fbb8f84d0..ca386e15c 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -197,7 +197,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[pods.PodPipelineRequest], error) {
+) (lib.Pipeline[pods.PodPipelineRequest], int, int, error) {
return lib.NewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index 6fd248321..f9cf20b94 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -83,7 +83,7 @@ type StepMonitor struct {
}
// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
+func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -101,10 +101,14 @@ func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMon
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return m.step.Init(ctx, client, step)
}
+func (m StepMonitor) RequiredKnowledges() []string {
+ return m.step.RequiredKnowledges()
+}
+
// Run the step and measure its execution time.
func (m StepMonitor) Run() ([]plugins.Decision, error) {
if m.runTimer != nil {
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index ed7416848..a61f0f404 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -80,7 +80,7 @@ type mockMonitorStep struct {
runCalled bool
}
-func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
m.initCalled = true
return m.initError
}
@@ -90,6 +90,10 @@ func (m *mockMonitorStep) Run() ([]plugins.Decision, error) {
return m.decisions, m.runError
}
+func (m *mockMonitorStep) RequiredKnowledges() []string {
+ return []string{}
+}
+
func TestMonitorStep(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{
@@ -97,7 +101,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +121,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +143,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +163,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +193,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +218,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +246,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index 08c26213d..6c782b1cd 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -14,6 +14,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/prometheus/client_golang/prometheus"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -33,27 +34,44 @@ type Pipeline struct {
func (p *Pipeline) Init(
ctx context.Context,
- confedSteps []v1alpha1.DetectorSpec,
+ confedSteps []v1alpha1.StepSpec,
supportedSteps map[string]Step,
-) error {
+) (ready int, total int, err error) {
p.order = []string{}
// Load all steps from the configuration.
p.steps = make(map[string]Step, len(confedSteps))
+ total = len(confedSteps)
+ ready = 0
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
- return errors.New("descheduler: unsupported step: " + stepConf.Name)
+ return ready, total, errors.New("descheduler: unsupported step: " + stepConf.Name)
+ }
+ // Check if all knowledges this step requires are available.
+ for _, knowledgeName := range step.RequiredKnowledges() {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := p.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
+ return ready, total, err
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return ready, total, errors.New("knowledge not ready: " + knowledgeName)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return ready, total, errors.New("knowledge has no data: " + knowledgeName)
+ }
}
step = monitorStep(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
- return err
+ return ready, total, err
}
p.steps[stepConf.Name] = step
p.order = append(p.order, stepConf.Name)
slog.Info("descheduler: added step", "name", stepConf.Name)
+ ready++
}
- return nil
+ return ready, total, nil
}
// Execute the descheduler steps in parallel and collect the decisions made by
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index 0cae5eaff..ba4e84c5c 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -46,14 +46,19 @@ func (c *DeschedulingsPipelineController) PipelineType() v1alpha1.PipelineType {
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (*Pipeline, error) {
- pipeline := &Pipeline{
+func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (
+ pipeline *Pipeline,
+ ready int,
+ total int,
+ err error,
+) {
+ pipeline = &Pipeline{
Client: c.Client,
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- err := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
- return pipeline, err
+ ready, total, err = pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
+ return pipeline, ready, total, err
}
func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx context.Context) {
@@ -78,6 +83,24 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
}
}
+func (c *DeschedulingsPipelineController) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
+ dependencies := make(map[string]struct{})
+ for _, stepConf := range p.Spec.Detectors {
+ step, ok := supportedSteps[stepConf.Name]
+ if !ok {
+ continue
+ }
+ for _, knowledgeName := range step.RequiredKnowledges() {
+ dependencies[knowledgeName] = struct{}{}
+ }
+ }
+ result := make([]string, 0, len(dependencies))
+ for knowledgeName := range dependencies {
+ result = append(result, knowledgeName)
+ }
+ return result
+}
+
func (c *DeschedulingsPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// This controller does not reconcile any resources directly.
return ctrl.Result{}, nil
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 86a254d93..83b0ce751 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -33,20 +33,23 @@ type mockControllerStep struct{}
func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
return nil, nil
}
-func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
+func (m *mockControllerStep) RequiredKnowledges() []string {
+ return []string{}
+}
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.DetectorSpec
+ steps []v1alpha1.StepSpec
expectError bool
expectedError string
}{
{
name: "successful pipeline initialization",
- steps: []v1alpha1.DetectorSpec{
+ steps: []v1alpha1.StepSpec{
{
Name: "mock-step",
},
@@ -55,7 +58,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- steps: []v1alpha1.DetectorSpec{
+ steps: []v1alpha1.StepSpec{
{
Name: "unsupported",
},
@@ -65,7 +68,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "empty steps",
- steps: []v1alpha1.DetectorSpec{},
+ steps: []v1alpha1.StepSpec{},
expectError: false,
},
}
@@ -81,7 +84,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
CycleDetector: controller.CycleDetector,
Monitor: controller.Monitor,
}
- err := pipeline.Init(t.Context(), tt.steps, map[string]Step{
+ _, _, err := pipeline.Init(t.Context(), tt.steps, map[string]Step{
"mock-step": &mockControllerStep{},
})
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index d006f2b8f..55bedbb4e 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -30,7 +30,7 @@ func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
return m.decisions, nil
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
if m.initError != nil {
return m.initError
}
@@ -38,11 +38,15 @@ func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step
return nil
}
+func (m *mockPipelineStep) RequiredKnowledges() []string {
+ return []string{}
+}
+
func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
supportedSteps map[string]Step
- confedSteps []v1alpha1.DetectorSpec
+ confedSteps []v1alpha1.StepSpec
expectedSteps int
expectedError bool
}{
@@ -51,7 +55,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "test-step",
}},
expectedSteps: 1,
@@ -61,7 +65,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "unsupported-step",
}},
expectedError: true,
@@ -71,7 +75,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "failing-step",
}},
expectedError: true,
@@ -82,7 +86,7 @@ func TestPipeline_Init(t *testing.T) {
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{
+ confedSteps: []v1alpha1.StepSpec{
{
Name: "step1",
},
@@ -98,7 +102,7 @@ func TestPipeline_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
pipeline := &Pipeline{}
- err := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
+ _, _, err := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
if tt.expectedError {
if err == nil {
t.Fatalf("expected error during initialization, got none")
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index c1b6ea902..af97f6609 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -21,7 +21,7 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
@@ -31,6 +31,10 @@ func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1
return nil
}
+func (s *Detector[Opts]) RequiredKnowledges() []string {
+ return []string{}
+}
+
type Decision struct {
// Get the VM ID for which this decision applies.
VMID string
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index a0f581c0a..524c69547 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -23,7 +23,7 @@ func (o MockOptions) Validate() error {
func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
- err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
+ err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
Opts: runtime.RawExtension{Raw: []byte(`{
"option1": "value1",
"option2": 2
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/step.go
index 552edf87b..838ef29c0 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/step.go
@@ -21,5 +21,10 @@ type Step interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+ // Descheduler steps can define knowledges they depend on, which should be
+ // ready to be able to execute the step properly.
+ // The returned slice contains the names of the knowledges which
+ // can be found as kubernetes custom resources of kind Knowledge.
+ RequiredKnowledges() []string
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 37a1ad3f1..4dcda80c6 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -14,6 +14,7 @@ import (
"sync"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -42,24 +43,27 @@ type pipeline[RequestType PipelineRequest] struct {
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
- client client.Client,
+ cl client.Client,
name string,
supportedFilters map[string]func() Filter[RequestType],
confedFilters []v1alpha1.StepSpec,
supportedWeighers map[string]func() Weigher[RequestType],
confedWeighers []v1alpha1.StepSpec,
monitor PipelineMonitor,
-) (Pipeline[RequestType], error) {
+) (p Pipeline[RequestType], ready int, total int, err error) {
pipelineMonitor := monitor.SubPipeline(name)
// Ensure there are no overlaps between filter and weigher names.
for filterName := range supportedFilters {
if _, ok := supportedWeighers[filterName]; ok {
- return nil, errors.New("step name overlap between filters and weighers: " + filterName)
+ return nil, ready, total, errors.New("step name overlap between filters and weighers: " + filterName)
}
}
+ total = len(confedFilters) + len(confedWeighers)
+ ready = 0
+
// Load all filters from the configuration.
filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
filtersOrder := []string{}
@@ -68,16 +72,17 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "filters", maps.Keys(supportedFilters))
makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return nil, errors.New("unsupported filter name: " + filterConfig.Name)
+ return nil, ready, total, errors.New("unsupported filter name: " + filterConfig.Name)
}
filter := makeFilter()
- filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
- if err := filter.Init(ctx, client, filterConfig); err != nil {
- return nil, errors.New("failed to initialize filter: " + err.Error())
+ filter = monitorFilter(ctx, cl, filterConfig, filter, pipelineMonitor)
+ if err := filter.Init(ctx, cl, filterConfig); err != nil {
+ return nil, ready, total, errors.New("failed to initialize filter: " + err.Error())
}
filtersByName[filterConfig.Name] = filter
filtersOrder = append(filtersOrder, filterConfig.Name)
slog.Info("scheduler: added filter", "name", filterConfig.Name)
+ ready++
}
// Load all weighers from the configuration.
@@ -88,17 +93,32 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
makeWeigher, ok := supportedWeighers[weigherConfig.Name]
if !ok {
- return nil, errors.New("unsupported weigher name: " + weigherConfig.Name)
+ return nil, ready, total, errors.New("unsupported weigher name: " + weigherConfig.Name)
}
weigher := makeWeigher()
+ // Check if all knowledges this step requires are available.
+ for _, knowledgeName := range weigher.RequiredKnowledges() {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := cl.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
+ return nil, ready, total, err
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return nil, ready, total, errors.New("knowledge not ready: " + knowledgeName)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return nil, ready, total, errors.New("knowledge has no data: " + knowledgeName)
+ }
+ }
weigher = validateWeigher(weigher)
- weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
- if err := weigher.Init(ctx, client, weigherConfig); err != nil {
- return nil, errors.New("failed to initialize pipeline step: " + err.Error())
+ weigher = monitorWeigher(ctx, cl, weigherConfig, weigher, pipelineMonitor)
+ if err := weigher.Init(ctx, cl, weigherConfig); err != nil {
+ return nil, ready, total, errors.New("failed to initialize pipeline step: " + err.Error())
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
+ ready++
}
return &pipeline[RequestType]{
@@ -107,7 +127,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
weighersOrder: weighersOrder,
weighers: weighersByName,
monitor: pipelineMonitor,
- }, nil
+ }, ready, total, nil
}
// Execute filters and collect their activations by step name.
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 76dd08e53..51561ff1c 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -7,7 +7,6 @@ import (
"context"
"testing"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -193,13 +192,14 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
}
tests := []struct {
- name string
- pipeline *v1alpha1.Pipeline
- knowledges []v1alpha1.Knowledge
- schedulingDomain v1alpha1.SchedulingDomain
- initPipelineError bool
- expectReady bool
- expectInMap bool
+ name string
+ pipeline *v1alpha1.Pipeline
+ knowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
+ knowledges []v1alpha1.Knowledge
+ schedulingDomain v1alpha1.SchedulingDomain
+ initPipelineError bool
+ expectReady bool
+ expectInMap bool
}{
{
name: "pipeline with all steps ready",
@@ -252,13 +252,13 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "missing-knowledge", Namespace: "default"},
- },
},
},
},
},
+ knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
+ return []string{"missing-knowledge"}
+ },
knowledges: []v1alpha1.Knowledge{},
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReady: true,
@@ -323,6 +323,9 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
return mockPipeline{}, 0, 0, context.Canceled
}
}
+ if tt.knowledgeDependenciesFunc != nil {
+ initializer.collectKnowledgeDependenciesFunc = tt.knowledgeDependenciesFunc
+ }
controller := &BasePipelineController[mockPipeline]{
Client: fakeClient,
@@ -496,11 +499,12 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
}
tests := []struct {
- name string
- knowledge *v1alpha1.Knowledge
- pipelines []v1alpha1.Pipeline
- schedulingDomain v1alpha1.SchedulingDomain
- expectReEvaluated []string
+ name string
+ knowledge *v1alpha1.Knowledge
+ knowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
+ pipelines []v1alpha1.Pipeline
+ schedulingDomain v1alpha1.SchedulingDomain
+ expectReEvaluated []string
}{
{
name: "knowledge change triggers dependent pipeline re-evaluation",
@@ -527,9 +531,6 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
@@ -544,14 +545,17 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "other-knowledge", Namespace: "default"},
- },
},
},
},
},
},
+ knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
+ if p.Name == "dependent-pipeline" {
+ return []string{"test-knowledge"}
+ }
+ return []string{"other-knowledge"}
+ },
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReEvaluated: []string{"dependent-pipeline"},
},
@@ -577,14 +581,14 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
},
},
+ knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
+ return []string{"test-knowledge"}
+ },
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReEvaluated: []string{},
},
@@ -654,13 +658,13 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
}
+ knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
+ return []string{"test-knowledge"}
+ }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -672,7 +676,8 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
},
Pipelines: make(map[string]mockPipeline),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
@@ -805,13 +810,13 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
}
+ knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
+ return []string{"test-knowledge"}
+ }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -823,7 +828,8 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
},
Pipelines: make(map[string]mockPipeline),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
@@ -873,13 +879,13 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
}
+ knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
+ return []string{"test-knowledge"}
+ }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -891,7 +897,8 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
+ collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
},
Pipelines: map[string]mockPipeline{
"test-pipeline": {name: "test-pipeline"},
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 2e361c1b3..433777085 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest] struct {
+type StepMonitor[RequestType PipelineRequest, StepType Step[RequestType]] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType]
+ Step StepType
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -43,19 +43,31 @@ type StepMonitor[RequestType PipelineRequest] struct {
stepImpactObserver *prometheus.HistogramVec
}
+type FilterMonitor[RequestType PipelineRequest, StepType Filter[RequestType]] struct {
+ *StepMonitor[RequestType, StepType]
+}
+
+type WeigherMonitor[RequestType PipelineRequest, StepType Weigher[RequestType]] struct {
+ *StepMonitor[RequestType, StepType]
+}
+
+func (w *WeigherMonitor[RequestType, StepType]) RequiredKnowledges() []string {
+ return w.Step.RequiredKnowledges()
+}
+
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest](
+func monitorStep[RequestType PipelineRequest, StepType Step[RequestType]](
_ context.Context,
_ client.Client,
step v1alpha1.StepSpec,
- impl Step[RequestType],
+ impl StepType,
m PipelineMonitor,
-) *StepMonitor[RequestType] {
+) *StepMonitor[RequestType, StepType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
@@ -67,7 +79,7 @@ func monitorStep[RequestType PipelineRequest](
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
WithLabelValues(m.PipelineName, step.Name)
}
- return &StepMonitor[RequestType]{
+ return &StepMonitor[RequestType, StepType]{
Step: impl,
stepName: step.Name,
pipelineName: m.PipelineName,
@@ -79,8 +91,30 @@ func monitorStep[RequestType PipelineRequest](
}
}
+func monitorFilter[RequestType PipelineRequest, StepType Filter[RequestType]](
+ ctx context.Context,
+ cl client.Client,
+ step v1alpha1.StepSpec,
+ impl StepType,
+ m PipelineMonitor,
+) *FilterMonitor[RequestType, StepType] {
+ stepMonitor := monitorStep[RequestType, StepType](ctx, cl, step, impl, m)
+ return &FilterMonitor[RequestType, StepType]{StepMonitor: stepMonitor}
+}
+
+func monitorWeigher[RequestType PipelineRequest, StepType Weigher[RequestType]](
+ ctx context.Context,
+ cl client.Client,
+ step v1alpha1.StepSpec,
+ impl StepType,
+ m PipelineMonitor,
+) *WeigherMonitor[RequestType, StepType] {
+ stepMonitor := monitorStep[RequestType, StepType](ctx, cl, step, impl, m)
+ return &WeigherMonitor[RequestType, StepType]{StepMonitor: stepMonitor}
+}
+
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index c248ec576..7172f8baa 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -21,7 +21,7 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest]{
+ monitor := &StepMonitor[mockPipelineRequest, *mockStep[mockPipelineRequest]]{
stepName: "mock_step",
Step: &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
From 7cbeb3d973bd799075a44103824114af4ce0f092 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 14:47:30 +0100
Subject: [PATCH 09/41] Revert "Ran into the wrong direction"
This reverts commit 1d424d752c0ab95e4b2eec5c34c636d6c7d07e65.
---
.../decisions/nova/pipeline_controller.go | 20 +----
.../nova/pipeline_controller_test.go | 6 +-
.../filters/filter_live_migratable_test.go | 12 +--
.../filter_requested_destination_test.go | 8 +-
.../decisions/pods/pipeline_controller.go | 2 +-
.../scheduling/descheduling/nova/monitor.go | 8 +-
.../descheduling/nova/monitor_test.go | 20 ++---
.../scheduling/descheduling/nova/pipeline.go | 28 ++-----
.../descheduling/nova/pipeline_controller.go | 31 +------
.../nova/pipeline_controller_test.go | 15 ++--
.../descheduling/nova/pipeline_test.go | 18 ++---
.../descheduling/nova/plugins/base.go | 6 +-
.../descheduling/nova/plugins/base_test.go | 2 +-
internal/scheduling/descheduling/nova/step.go | 7 +-
internal/scheduling/lib/pipeline.go | 44 +++-------
.../lib/pipeline_controller_test.go | 81 +++++++++----------
internal/scheduling/lib/step_monitor.go | 50 ++----------
internal/scheduling/lib/step_monitor_test.go | 2 +-
18 files changed, 103 insertions(+), 257 deletions(-)
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go
index 8abe91197..dfbd7a249 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller.go
@@ -151,7 +151,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[api.ExternalSchedulerRequest], int, int, error) {
+) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
return lib.NewFilterWeigherPipeline(
ctx, c.Client, p.Name,
@@ -161,24 +161,6 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
- dependencies := make(map[string]struct{})
- for _, weigherConf := range p.Spec.Weighers {
- weigher, ok := supportedWeighers[weigherConf.Name]
- if !ok {
- continue
- }
- for _, knowledgeName := range weigher().RequiredKnowledges() {
- dependencies[knowledgeName] = struct{}{}
- }
- }
- result := make([]string, 0, len(dependencies))
- for knowledgeName := range dependencies {
- result = append(result, knowledgeName)
- }
- return result
-}
-
func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainNova
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index fbcd1ad48..69ead2c8c 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -207,7 +207,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
if tt.pipeline != nil {
- pipeline, _, _, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
@@ -320,7 +320,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, _, _, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -672,7 +672,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
// Setup runtime pipeline if needed
if tt.pipeline != nil {
- pipeline, _, _, err := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
+ pipeline, err := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index b987e9c15..edb33c5c0 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -728,9 +728,7 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
- },
+ Client: fakeClient,
},
}
@@ -815,9 +813,7 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
- },
+ Client: fakeClient,
},
}
@@ -861,9 +857,7 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
step := &FilterLiveMigratableStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
- },
+ Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index 5952e4c3f..3ba008214 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -495,9 +495,7 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
step := &FilterRequestedDestinationStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
- },
+ Client: fakeClient,
},
}
@@ -578,9 +576,7 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
step := &FilterRequestedDestinationStep{
BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
- },
+ Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index ca386e15c..fbb8f84d0 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -197,7 +197,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[pods.PodPipelineRequest], int, int, error) {
+) (lib.Pipeline[pods.PodPipelineRequest], error) {
return lib.NewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index f9cf20b94..6fd248321 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -83,7 +83,7 @@ type StepMonitor struct {
}
// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
+func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -101,14 +101,10 @@ func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return m.step.Init(ctx, client, step)
}
-func (m StepMonitor) RequiredKnowledges() []string {
- return m.step.RequiredKnowledges()
-}
-
// Run the step and measure its execution time.
func (m StepMonitor) Run() ([]plugins.Decision, error) {
if m.runTimer != nil {
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index a61f0f404..ed7416848 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -80,7 +80,7 @@ type mockMonitorStep struct {
runCalled bool
}
-func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
m.initCalled = true
return m.initError
}
@@ -90,10 +90,6 @@ func (m *mockMonitorStep) Run() ([]plugins.Decision, error) {
return m.decisions, m.runError
}
-func (m *mockMonitorStep) RequiredKnowledges() []string {
- return []string{}
-}
-
func TestMonitorStep(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{
@@ -101,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -121,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -143,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -163,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -193,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -218,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -246,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index 6c782b1cd..08c26213d 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -14,7 +14,6 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/prometheus/client_golang/prometheus"
- "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -34,44 +33,27 @@ type Pipeline struct {
func (p *Pipeline) Init(
ctx context.Context,
- confedSteps []v1alpha1.StepSpec,
+ confedSteps []v1alpha1.DetectorSpec,
supportedSteps map[string]Step,
-) (ready int, total int, err error) {
+) error {
p.order = []string{}
// Load all steps from the configuration.
p.steps = make(map[string]Step, len(confedSteps))
- total = len(confedSteps)
- ready = 0
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
- return ready, total, errors.New("descheduler: unsupported step: " + stepConf.Name)
- }
- // Check if all knowledges this step requires are available.
- for _, knowledgeName := range step.RequiredKnowledges() {
- knowledge := &v1alpha1.Knowledge{}
- if err := p.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
- return ready, total, err
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return ready, total, errors.New("knowledge not ready: " + knowledgeName)
- }
- if knowledge.Status.RawLength == 0 {
- return ready, total, errors.New("knowledge has no data: " + knowledgeName)
- }
+ return errors.New("descheduler: unsupported step: " + stepConf.Name)
}
step = monitorStep(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
- return ready, total, err
+ return err
}
p.steps[stepConf.Name] = step
p.order = append(p.order, stepConf.Name)
slog.Info("descheduler: added step", "name", stepConf.Name)
- ready++
}
- return ready, total, nil
+ return nil
}
// Execute the descheduler steps in parallel and collect the decisions made by
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index ba4e84c5c..0cae5eaff 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -46,19 +46,14 @@ func (c *DeschedulingsPipelineController) PipelineType() v1alpha1.PipelineType {
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (
- pipeline *Pipeline,
- ready int,
- total int,
- err error,
-) {
- pipeline = &Pipeline{
+func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (*Pipeline, error) {
+ pipeline := &Pipeline{
Client: c.Client,
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- ready, total, err = pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
- return pipeline, ready, total, err
+ err := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
+ return pipeline, err
}
func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx context.Context) {
@@ -83,24 +78,6 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
}
}
-func (c *DeschedulingsPipelineController) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
- dependencies := make(map[string]struct{})
- for _, stepConf := range p.Spec.Detectors {
- step, ok := supportedSteps[stepConf.Name]
- if !ok {
- continue
- }
- for _, knowledgeName := range step.RequiredKnowledges() {
- dependencies[knowledgeName] = struct{}{}
- }
- }
- result := make([]string, 0, len(dependencies))
- for knowledgeName := range dependencies {
- result = append(result, knowledgeName)
- }
- return result
-}
-
func (c *DeschedulingsPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// This controller does not reconcile any resources directly.
return ctrl.Result{}, nil
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 83b0ce751..86a254d93 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -33,23 +33,20 @@ type mockControllerStep struct{}
func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
return nil, nil
}
-func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return nil
}
-func (m *mockControllerStep) RequiredKnowledges() []string {
- return []string{}
-}
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ steps []v1alpha1.DetectorSpec
expectError bool
expectedError string
}{
{
name: "successful pipeline initialization",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
Name: "mock-step",
},
@@ -58,7 +55,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
Name: "unsupported",
},
@@ -68,7 +65,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ steps: []v1alpha1.DetectorSpec{},
expectError: false,
},
}
@@ -84,7 +81,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
CycleDetector: controller.CycleDetector,
Monitor: controller.Monitor,
}
- _, _, err := pipeline.Init(t.Context(), tt.steps, map[string]Step{
+ err := pipeline.Init(t.Context(), tt.steps, map[string]Step{
"mock-step": &mockControllerStep{},
})
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index 55bedbb4e..d006f2b8f 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -30,7 +30,7 @@ func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
return m.decisions, nil
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
if m.initError != nil {
return m.initError
}
@@ -38,15 +38,11 @@ func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step
return nil
}
-func (m *mockPipelineStep) RequiredKnowledges() []string {
- return []string{}
-}
-
func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
supportedSteps map[string]Step
- confedSteps []v1alpha1.StepSpec
+ confedSteps []v1alpha1.DetectorSpec
expectedSteps int
expectedError bool
}{
@@ -55,7 +51,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "test-step",
}},
expectedSteps: 1,
@@ -65,7 +61,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "unsupported-step",
}},
expectedError: true,
@@ -75,7 +71,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "failing-step",
}},
expectedError: true,
@@ -86,7 +82,7 @@ func TestPipeline_Init(t *testing.T) {
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{
+ confedSteps: []v1alpha1.DetectorSpec{
{
Name: "step1",
},
@@ -102,7 +98,7 @@ func TestPipeline_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
pipeline := &Pipeline{}
- _, _, err := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
+ err := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
if tt.expectedError {
if err == nil {
t.Fatalf("expected error during initialization, got none")
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index af97f6609..c1b6ea902 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -21,7 +21,7 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
@@ -31,10 +31,6 @@ func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1
return nil
}
-func (s *Detector[Opts]) RequiredKnowledges() []string {
- return []string{}
-}
-
type Decision struct {
// Get the VM ID for which this decision applies.
VMID string
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index 524c69547..a0f581c0a 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -23,7 +23,7 @@ func (o MockOptions) Validate() error {
func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
- err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
+ err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
Opts: runtime.RawExtension{Raw: []byte(`{
"option1": "value1",
"option2": 2
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/step.go
index 838ef29c0..552edf87b 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/step.go
@@ -21,10 +21,5 @@ type Step interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
- // Descheduler steps can define knowledges they depend on, which should be
- // ready to be able to execute the step properly.
- // The returned slice contains the names of the knowledges which
- // can be found as kubernetes custom resources of kind Knowledge.
- RequiredKnowledges() []string
+ Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 4dcda80c6..37a1ad3f1 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -14,7 +14,6 @@ import (
"sync"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -43,27 +42,24 @@ type pipeline[RequestType PipelineRequest] struct {
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
- cl client.Client,
+ client client.Client,
name string,
supportedFilters map[string]func() Filter[RequestType],
confedFilters []v1alpha1.StepSpec,
supportedWeighers map[string]func() Weigher[RequestType],
confedWeighers []v1alpha1.StepSpec,
monitor PipelineMonitor,
-) (p Pipeline[RequestType], ready int, total int, err error) {
+) (Pipeline[RequestType], error) {
pipelineMonitor := monitor.SubPipeline(name)
// Ensure there are no overlaps between filter and weigher names.
for filterName := range supportedFilters {
if _, ok := supportedWeighers[filterName]; ok {
- return nil, ready, total, errors.New("step name overlap between filters and weighers: " + filterName)
+ return nil, errors.New("step name overlap between filters and weighers: " + filterName)
}
}
- total = len(confedFilters) + len(confedWeighers)
- ready = 0
-
// Load all filters from the configuration.
filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
filtersOrder := []string{}
@@ -72,17 +68,16 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "filters", maps.Keys(supportedFilters))
makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return nil, ready, total, errors.New("unsupported filter name: " + filterConfig.Name)
+ return nil, errors.New("unsupported filter name: " + filterConfig.Name)
}
filter := makeFilter()
- filter = monitorFilter(ctx, cl, filterConfig, filter, pipelineMonitor)
- if err := filter.Init(ctx, cl, filterConfig); err != nil {
- return nil, ready, total, errors.New("failed to initialize filter: " + err.Error())
+ filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
+ if err := filter.Init(ctx, client, filterConfig); err != nil {
+ return nil, errors.New("failed to initialize filter: " + err.Error())
}
filtersByName[filterConfig.Name] = filter
filtersOrder = append(filtersOrder, filterConfig.Name)
slog.Info("scheduler: added filter", "name", filterConfig.Name)
- ready++
}
// Load all weighers from the configuration.
@@ -93,32 +88,17 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
makeWeigher, ok := supportedWeighers[weigherConfig.Name]
if !ok {
- return nil, ready, total, errors.New("unsupported weigher name: " + weigherConfig.Name)
+ return nil, errors.New("unsupported weigher name: " + weigherConfig.Name)
}
weigher := makeWeigher()
- // Check if all knowledges this step requires are available.
- for _, knowledgeName := range weigher.RequiredKnowledges() {
- knowledge := &v1alpha1.Knowledge{}
- if err := cl.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
- return nil, ready, total, err
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return nil, ready, total, errors.New("knowledge not ready: " + knowledgeName)
- }
- if knowledge.Status.RawLength == 0 {
- return nil, ready, total, errors.New("knowledge has no data: " + knowledgeName)
- }
- }
weigher = validateWeigher(weigher)
- weigher = monitorWeigher(ctx, cl, weigherConfig, weigher, pipelineMonitor)
- if err := weigher.Init(ctx, cl, weigherConfig); err != nil {
- return nil, ready, total, errors.New("failed to initialize pipeline step: " + err.Error())
+ weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
+ if err := weigher.Init(ctx, client, weigherConfig); err != nil {
+ return nil, errors.New("failed to initialize pipeline step: " + err.Error())
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
- ready++
}
return &pipeline[RequestType]{
@@ -127,7 +107,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
weighersOrder: weighersOrder,
weighers: weighersByName,
monitor: pipelineMonitor,
- }, ready, total, nil
+ }, nil
}
// Execute filters and collect their activations by step name.
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 51561ff1c..76dd08e53 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -7,6 +7,7 @@ import (
"context"
"testing"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -192,14 +193,13 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
}
tests := []struct {
- name string
- pipeline *v1alpha1.Pipeline
- knowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
- knowledges []v1alpha1.Knowledge
- schedulingDomain v1alpha1.SchedulingDomain
- initPipelineError bool
- expectReady bool
- expectInMap bool
+ name string
+ pipeline *v1alpha1.Pipeline
+ knowledges []v1alpha1.Knowledge
+ schedulingDomain v1alpha1.SchedulingDomain
+ initPipelineError bool
+ expectReady bool
+ expectInMap bool
}{
{
name: "pipeline with all steps ready",
@@ -252,13 +252,13 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "missing-knowledge", Namespace: "default"},
+ },
},
},
},
},
- knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
- return []string{"missing-knowledge"}
- },
knowledges: []v1alpha1.Knowledge{},
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReady: true,
@@ -323,9 +323,6 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
return mockPipeline{}, 0, 0, context.Canceled
}
}
- if tt.knowledgeDependenciesFunc != nil {
- initializer.collectKnowledgeDependenciesFunc = tt.knowledgeDependenciesFunc
- }
controller := &BasePipelineController[mockPipeline]{
Client: fakeClient,
@@ -499,12 +496,11 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
}
tests := []struct {
- name string
- knowledge *v1alpha1.Knowledge
- knowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
- pipelines []v1alpha1.Pipeline
- schedulingDomain v1alpha1.SchedulingDomain
- expectReEvaluated []string
+ name string
+ knowledge *v1alpha1.Knowledge
+ pipelines []v1alpha1.Pipeline
+ schedulingDomain v1alpha1.SchedulingDomain
+ expectReEvaluated []string
}{
{
name: "knowledge change triggers dependent pipeline re-evaluation",
@@ -531,6 +527,9 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "test-knowledge", Namespace: "default"},
+ },
},
},
},
@@ -545,17 +544,14 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "other-knowledge", Namespace: "default"},
+ },
},
},
},
},
},
- knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
- if p.Name == "dependent-pipeline" {
- return []string{"test-knowledge"}
- }
- return []string{"other-knowledge"}
- },
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReEvaluated: []string{"dependent-pipeline"},
},
@@ -581,14 +577,14 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "test-knowledge", Namespace: "default"},
+ },
},
},
},
},
},
- knowledgeDependenciesFunc: func(p v1alpha1.Pipeline) []string {
- return []string{"test-knowledge"}
- },
schedulingDomain: v1alpha1.SchedulingDomainNova,
expectReEvaluated: []string{},
},
@@ -658,13 +654,13 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "test-knowledge", Namespace: "default"},
+ },
},
},
},
}
- knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
- return []string{"test-knowledge"}
- }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -676,8 +672,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
- collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
},
Pipelines: make(map[string]mockPipeline),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
@@ -810,13 +805,13 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "test-knowledge", Namespace: "default"},
+ },
},
},
},
}
- knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
- return []string{"test-knowledge"}
- }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -828,8 +823,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
- collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
},
Pipelines: make(map[string]mockPipeline),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
@@ -879,13 +873,13 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
+ Knowledges: []corev1.ObjectReference{
+ {Name: "test-knowledge", Namespace: "default"},
+ },
},
},
},
}
- knowledgeDependenciesFunc := func(p v1alpha1.Pipeline) []string {
- return []string{"test-knowledge"}
- }
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
@@ -897,8 +891,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Client: fakeClient,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Initializer: &mockPipelineInitializer{
- pipelineType: v1alpha1.PipelineTypeFilterWeigher,
- collectKnowledgeDependenciesFunc: knowledgeDependenciesFunc,
+ pipelineType: v1alpha1.PipelineTypeFilterWeigher,
},
Pipelines: map[string]mockPipeline{
"test-pipeline": {name: "test-pipeline"},
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 433777085..2e361c1b3 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest, StepType Step[RequestType]] struct {
+type StepMonitor[RequestType PipelineRequest] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest, StepType Step[RequestType]] struct
stepName string
// The wrapped scheduler step to monitor.
- Step StepType
+ Step Step[RequestType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -43,31 +43,19 @@ type StepMonitor[RequestType PipelineRequest, StepType Step[RequestType]] struct
stepImpactObserver *prometheus.HistogramVec
}
-type FilterMonitor[RequestType PipelineRequest, StepType Filter[RequestType]] struct {
- *StepMonitor[RequestType, StepType]
-}
-
-type WeigherMonitor[RequestType PipelineRequest, StepType Weigher[RequestType]] struct {
- *StepMonitor[RequestType, StepType]
-}
-
-func (w *WeigherMonitor[RequestType, StepType]) RequiredKnowledges() []string {
- return w.Step.RequiredKnowledges()
-}
-
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest, StepType Step[RequestType]](
+func monitorStep[RequestType PipelineRequest](
_ context.Context,
_ client.Client,
step v1alpha1.StepSpec,
- impl StepType,
+ impl Step[RequestType],
m PipelineMonitor,
-) *StepMonitor[RequestType, StepType] {
+) *StepMonitor[RequestType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
@@ -79,7 +67,7 @@ func monitorStep[RequestType PipelineRequest, StepType Step[RequestType]](
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
WithLabelValues(m.PipelineName, step.Name)
}
- return &StepMonitor[RequestType, StepType]{
+ return &StepMonitor[RequestType]{
Step: impl,
stepName: step.Name,
pipelineName: m.PipelineName,
@@ -91,30 +79,8 @@ func monitorStep[RequestType PipelineRequest, StepType Step[RequestType]](
}
}
-func monitorFilter[RequestType PipelineRequest, StepType Filter[RequestType]](
- ctx context.Context,
- cl client.Client,
- step v1alpha1.StepSpec,
- impl StepType,
- m PipelineMonitor,
-) *FilterMonitor[RequestType, StepType] {
- stepMonitor := monitorStep[RequestType, StepType](ctx, cl, step, impl, m)
- return &FilterMonitor[RequestType, StepType]{StepMonitor: stepMonitor}
-}
-
-func monitorWeigher[RequestType PipelineRequest, StepType Weigher[RequestType]](
- ctx context.Context,
- cl client.Client,
- step v1alpha1.StepSpec,
- impl StepType,
- m PipelineMonitor,
-) *WeigherMonitor[RequestType, StepType] {
- stepMonitor := monitorStep[RequestType, StepType](ctx, cl, step, impl, m)
- return &WeigherMonitor[RequestType, StepType]{StepMonitor: stepMonitor}
-}
-
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index 7172f8baa..c248ec576 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -21,7 +21,7 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest, *mockStep[mockPipelineRequest]]{
+ monitor := &StepMonitor[mockPipelineRequest]{
stepName: "mock_step",
Step: &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
From 75f45e038cb8a52bb15b46a4cfe66373440d97c3 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Thu, 22 Jan 2026 14:47:43 +0100
Subject: [PATCH 10/41] xRevert "WIP"
This reverts commit 7f7b5a8b5f9446a3a3fdafe2b118ea2c798f6f98.
---
api/v1alpha1/pipeline_types.go | 89 ++++++-
api/v1alpha1/zz_generated.deepcopy.go | 56 ++++-
config/crd/bases/cortex.cloud_pipelines.yaml | 123 +++++++++-
config/crd/cortex.cloud_pipelines.yaml | 123 +++++++++-
.../templates/crd/cortex.cloud_pipelines.yaml | 123 +++++++++-
.../cinder/pipeline_controller_test.go | 36 +--
.../scheduling/decisions/machines/noop.go | 2 +-
.../machines/pipeline_controller_test.go | 24 +-
.../manila/pipeline_controller_test.go | 32 +--
.../nova/pipeline_controller_test.go | 60 ++---
.../pods/pipeline_controller_test.go | 24 +-
.../plugins/filters/filter_node_affinity.go | 2 +-
.../plugins/filters/filter_node_available.go | 2 +-
.../plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_noop.go | 2 +-
.../pods/plugins/filters/filter_taint.go | 2 +-
internal/scheduling/lib/pipeline.go | 13 +-
.../scheduling/lib/pipeline_controller.go | 123 ++++++----
.../lib/pipeline_controller_test.go | 221 ++++++++++++++----
internal/scheduling/lib/pipeline_test.go | 8 +-
internal/scheduling/lib/step.go | 67 ++----
internal/scheduling/lib/step_monitor.go | 24 +-
internal/scheduling/lib/step_monitor_test.go | 6 +-
internal/scheduling/lib/step_test.go | 11 +-
internal/scheduling/lib/weigher_validation.go | 7 +-
.../scheduling/lib/weigher_validation_test.go | 6 +-
26 files changed, 904 insertions(+), 284 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index ee2a8285d..449ec0309 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -4,16 +4,84 @@
package v1alpha1
import (
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
-type StepSpec struct {
+// Step as part of a cortex pipeline.
+type Step interface {
+ // Every step must have options so the pipeline can configure it.
+ GetOpts() runtime.RawExtension
+ // Every step must have a name so the pipeline can identify it.
+ GetName() string
+ // Every step can have an optional description.
+ GetDescription() string
+}
+
+// Filters remove host candidates from an initial set, leaving
+// valid candidates. Filters are run before weighers are applied, as
+// part of a filter-weigher scheduling pipeline.
+type FilterSpec struct {
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the extractor that can be used
+ // +kubebuilder:validation:Optional
+ Opts runtime.RawExtension `json:"opts,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+
+ // Filters are not allowed to depend on knowledges, as knowledges can
+ // be outdated leading to invalid filtering decisions.
+}
+
+func (f FilterSpec) GetOpts() runtime.RawExtension { return f.Opts }
+func (f FilterSpec) GetName() string { return f.Name }
+func (f FilterSpec) GetDescription() string { return f.Description }
+
+// Weighers assign weights to the remaining host candidates after filtering,
+// making some hosts more preferable than others. Weighers are run
+// after filters are applied, as part of a filter-weigher scheduling pipeline.
+type WeigherSpec struct {
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the extractor that can be used
+ // +kubebuilder:validation:Optional
+ Opts runtime.RawExtension `json:"opts,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+
+ // Knowledges this step depends on to be ready.
+ //
+ // Weighers can depend on knowledges as they don't break valid placements,
+ // they only make it more optimal.
+ // +kubebuilder:validation:Optional
+ Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
+}
+
+func (w WeigherSpec) GetOpts() runtime.RawExtension { return w.Opts }
+func (w WeigherSpec) GetName() string { return w.Name }
+func (w WeigherSpec) GetDescription() string { return w.Description }
+
+// Detectors find candidates for descheduling (migration off current host).
+// These detectors are run after weighers are applied, as part of a
+// descheduler scheduling pipeline.
+type DetectorSpec struct {
// The name of the scheduler step in the cortex implementation.
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
- // Additional configuration for the step.
+ // Additional configuration for the extractor that can be used
// +kubebuilder:validation:Optional
Opts runtime.RawExtension `json:"opts,omitempty"`
@@ -21,8 +89,19 @@ type StepSpec struct {
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
+
+ // Knowledges this step depends on to be ready.
+ //
+ // Detectors can depend on knowledges as they don't ensure valid placements
+ // and therefore are not on the critical path.
+ // +kubebuilder:validation:Optional
+ Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
+func (d DetectorSpec) GetOpts() runtime.RawExtension { return d.Opts }
+func (d DetectorSpec) GetName() string { return d.Name }
+func (d DetectorSpec) GetDescription() string { return d.Description }
+
type PipelineType string
const (
@@ -65,14 +144,14 @@ type PipelineSpec struct {
// Filters remove host candidates from an initial set, leaving
// valid candidates. Filters are run before weighers are applied.
// +kubebuilder:validation:Optional
- Filters []StepSpec `json:"filters,omitempty"`
+ Filters []FilterSpec `json:"filters,omitempty"`
// Ordered list of weighers to apply in a scheduling pipeline.
//
// This attribute is set only if the pipeline type is filter-weigher.
// These weighers are run after filters are applied.
// +kubebuilder:validation:Optional
- Weighers []StepSpec `json:"weighers,omitempty"`
+ Weighers []WeigherSpec `json:"weighers,omitempty"`
// Ordered list of detectors to apply in a descheduling pipeline.
//
@@ -80,7 +159,7 @@ type PipelineSpec struct {
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied.
// +kubebuilder:validation:Optional
- Detectors []StepSpec `json:"detectors,omitempty"`
+ Detectors []DetectorSpec `json:"detectors,omitempty"`
}
const (
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index ae02b8da2..785455a08 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -425,6 +425,43 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
+ *out = *in
+ in.Opts.DeepCopyInto(&out.Opts)
+ if in.Knowledges != nil {
+ in, out := &in.Knowledges, &out.Knowledges
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec.
+func (in *DetectorSpec) DeepCopy() *DetectorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DetectorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
+ *out = *in
+ in.Opts.DeepCopyInto(&out.Opts)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
+func (in *FilterSpec) DeepCopy() *FilterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(FilterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityDatasource) DeepCopyInto(out *IdentityDatasource) {
*out = *in
@@ -842,21 +879,21 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
- *out = make([]StepSpec, len(*in))
+ *out = make([]FilterSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Weighers != nil {
in, out := &in.Weighers, &out.Weighers
- *out = make([]StepSpec, len(*in))
+ *out = make([]WeigherSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Detectors != nil {
in, out := &in.Detectors, &out.Detectors
- *out = make([]StepSpec, len(*in))
+ *out = make([]DetectorSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1098,17 +1135,22 @@ func (in *StepResult) DeepCopy() *StepResult {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepSpec) DeepCopyInto(out *StepSpec) {
+func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
*out = *in
in.Opts.DeepCopyInto(&out.Opts)
+ if in.Knowledges != nil {
+ in, out := &in.Knowledges, &out.Knowledges
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
-func (in *StepSpec) DeepCopy() *StepSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherSpec.
+func (in *WeigherSpec) DeepCopy() *WeigherSpec {
if in == nil {
return nil
}
- out := new(StepSpec)
+ out := new(WeigherSpec)
in.DeepCopyInto(out)
return out
}
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 38c5ebc48..794acf0b1 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -73,19 +73,75 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -100,6 +156,10 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -112,7 +172,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -144,19 +205,75 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 38c5ebc48..794acf0b1 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -73,19 +73,75 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -100,6 +156,10 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -112,7 +172,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -144,19 +205,75 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 01aa46f5c..679bab0fb 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -79,19 +79,75 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
+ description: |-
+ Detectors find candidates for descheduling (migration off current host).
+ These detectors are run after weighers are applied, as part of a
+ descheduler scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Detectors can depend on knowledges as they don't ensure valid placements
+ and therefore are not on the critical path.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -106,6 +162,10 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
+ description: |-
+ Filters remove host candidates from an initial set, leaving
+ valid candidates. Filters are run before weighers are applied, as
+ part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -118,7 +178,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -150,19 +211,75 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
+ description: |-
+ Weighers assign weights to the remaining host candidates after filtering,
+ making some hosts more preferable than others. Weighers are run
+ after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ Knowledges this step depends on to be ready.
+
+ Weighers can depend on knowledges as they don't break valid placements,
+ they only make it more optimal.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the step.
+ description: Additional configuration for the extractor that
+ can be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 7121f5166..2b7cda1bf 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -175,8 +175,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
})
if err != nil {
@@ -284,8 +284,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -318,8 +318,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -372,8 +372,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -476,24 +476,24 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "test-plugin",
},
},
- weighers: []v1alpha1.StepSpec{
+ weighers: []v1alpha1.WeigherSpec{
{
Name: "test-plugin",
},
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index 3b0104aa6..6dfa7911b 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -15,7 +15,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index c218486bb..157b8ac13 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -211,26 +211,26 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{Name: "noop"},
},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{Name: "unsupported"},
},
expectError: true,
@@ -315,8 +315,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -349,8 +349,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -396,8 +396,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index ea8e20c14..d280f0e05 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -279,8 +279,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -313,8 +313,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -367,8 +367,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -471,19 +471,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "supported netapp step",
- weighers: []v1alpha1.StepSpec{
+ weighers: []v1alpha1.WeigherSpec{
{
Name: "netapp_cpu_usage_balancing",
Opts: runtime.RawExtension{
@@ -495,7 +495,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported-plugin",
},
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 69ead2c8c..b02face53 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -92,8 +92,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -121,8 +121,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -173,8 +173,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -264,19 +264,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "supported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
},
@@ -285,7 +285,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported-plugin",
},
@@ -294,7 +294,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with scoping options",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -306,7 +306,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with invalid scoping options",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -418,8 +418,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -430,8 +430,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -466,8 +466,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -478,8 +478,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -538,8 +538,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -550,8 +550,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -588,8 +588,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -626,8 +626,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 4e6a6f249..4d93a1720 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -186,19 +186,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "noop",
},
@@ -207,7 +207,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported",
},
@@ -292,8 +292,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -326,8 +326,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -373,8 +373,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index 265bffa24..acacc6ea6 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -19,7 +19,7 @@ type NodeAffinityFilter struct {
Alias string
}
-func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index 45ae98067..c668e5f0a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -18,7 +18,7 @@ type NodeAvailableFilter struct {
Alias string
}
-func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 44d185580..70e897b6a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -19,7 +19,7 @@ type NodeCapacityFilter struct {
Alias string
}
-func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 3cd328a50..08fbf1cd4 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -18,7 +18,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 82135b161..697c41466 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -18,7 +18,7 @@ type TaintFilter struct {
Alias string
}
-func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 37a1ad3f1..82f3ef528 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -27,7 +27,7 @@ type pipeline[RequestType PipelineRequest] struct {
// The activation function to use when combining the
// results of the scheduler steps.
ActivationFunction
- // The order in which filters are executed, by their step name.
+ // The order in which filters are applied, by their step name.
filtersOrder []string
// The filters by their name.
filters map[string]Filter[RequestType]
@@ -39,15 +39,22 @@ type pipeline[RequestType PipelineRequest] struct {
monitor PipelineMonitor
}
+type StepWrapper[RequestType PipelineRequest, StepType v1alpha1.Step] func(
+ ctx context.Context,
+ client client.Client,
+ step StepType,
+ impl Step[RequestType, StepType],
+) (Step[RequestType, StepType], error)
+
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
supportedFilters map[string]func() Filter[RequestType],
- confedFilters []v1alpha1.StepSpec,
+ confedFilters []v1alpha1.FilterSpec,
supportedWeighers map[string]func() Weigher[RequestType],
- confedWeighers []v1alpha1.StepSpec,
+ confedWeighers []v1alpha1.WeigherSpec,
monitor PipelineMonitor,
) (Pipeline[RequestType], error) {
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index d8028313c..50e3af497 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
@@ -26,27 +27,7 @@ type PipelineInitializer[PipelineType any] interface {
// This method is delegated to the parent controller, when a pipeline needs
// to be newly initialized or re-initialized to update it in the pipeline
// map.
- //
- // Ready and total indicate how many steps are ready out of the total
- // configured steps. Sometimes, steps may be unready but this does not
- // prevent the pipeline from being created, e.g., when weighers depend
- // on knowledges that are not yet ready.
- //
- // If there was an error that blocks us from creating the pipeline, return
- // a non-nil error.
- InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (
- pipeline PipelineType,
- ready int,
- total int,
- err error,
- )
-
- // Collect knowledge dependencies for the given pipeline.
- //
- // This is used to determine which pipelines depend on which knowledges
- // when a knowledge changes.
- CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string
-
+ InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (PipelineType, error)
// Get the accepted pipeline type for this controller.
//
// This is used to filter pipelines when listing existing pipelines on
@@ -108,10 +89,28 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
log := ctrl.LoggerFrom(ctx)
old := obj.DeepCopy()
- var err error
- c.Pipelines[obj.Name], obj.Status.ReadySteps, obj.Status.TotalSteps, err = c.
- Initializer.InitPipeline(ctx, *obj)
+ // Check if all steps are ready. If not, check if the step is mandatory.
+ obj.Status.TotalSteps = len(obj.Spec.Filters) + len(obj.Spec.Weighers) + len(obj.Spec.Detectors)
+ obj.Status.ReadySteps = 0
+ for range obj.Spec.Filters { // Could use len() directly but want to keep the pattern.
+ // If needed, check if this filter needs any dependencies. For now,
+ // as filters do not depend on knowledges, we skip this.
+ obj.Status.ReadySteps++
+ }
+ for _, detector := range obj.Spec.Detectors {
+ if err := c.checkAllKnowledgesReady(ctx, detector.Knowledges); err == nil {
+ obj.Status.ReadySteps++
+ }
+ }
+ for _, weigher := range obj.Spec.Weighers {
+ if err := c.checkAllKnowledgesReady(ctx, weigher.Knowledges); err == nil {
+ obj.Status.ReadySteps++
+ }
+ }
obj.Status.StepsReadyFrac = fmt.Sprintf("%d/%d", obj.Status.ReadySteps, obj.Status.TotalSteps)
+
+ var err error
+ c.Pipelines[obj.Name], err = c.Initializer.InitPipeline(ctx, *obj)
c.PipelineConfigs[obj.Name] = *obj
if err != nil {
log.Error(err, "failed to create pipeline", "pipelineName", obj.Name)
@@ -185,6 +184,45 @@ func (c *BasePipelineController[PipelineType]) HandlePipelineDeleted(
delete(c.PipelineConfigs, pipelineConf.Name)
}
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (c *BasePipelineController[PipelineType]) checkAllKnowledgesReady(
+ ctx context.Context,
+ objects []corev1.ObjectReference,
+) error {
+
+ log := ctrl.LoggerFrom(ctx)
+ // Check the status of all knowledges depending on this step.
+ readyKnowledges := 0
+ totalKnowledges := len(objects)
+ for _, objRef := range objects {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := c.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ log.Error(err, "failed to get knowledge depending on step", "knowledgeName", objRef.Name)
+ continue
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ log.Info("knowledge not ready due to error condition", "knowledgeName", objRef.Name)
+ continue
+ }
+ if knowledge.Status.RawLength == 0 {
+ log.Info("knowledge not ready, no data available", "knowledgeName", objRef.Name)
+ continue
+ }
+ readyKnowledges++
+ }
+ if readyKnowledges != totalKnowledges {
+ return fmt.Errorf(
+ "%d/%d knowledges ready",
+ readyKnowledges, totalKnowledges,
+ )
+ }
+ return nil
+}
+
// Handle a knowledge creation, update, or delete event from watching knowledge resources.
func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
ctx context.Context,
@@ -196,32 +234,37 @@ func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
return
}
log := ctrl.LoggerFrom(ctx)
- log.Info("knowledge changed readiness/availability, re-evaluating pipelines", "knowledgeName", obj.Name)
+ log.Info("knowledge changed, re-evaluating dependent pipelines", "knowledgeName", obj.Name)
+ // Find all pipelines depending on this knowledge and re-evaluate them.
var pipelines v1alpha1.PipelineList
if err := c.List(ctx, &pipelines); err != nil {
log.Error(err, "failed to list pipelines for knowledge", "knowledgeName", obj.Name)
return
}
for _, pipeline := range pipelines.Items {
- if pipeline.Spec.SchedulingDomain != c.SchedulingDomain {
- continue
- }
- if pipeline.Spec.Type != c.Initializer.PipelineType() {
- continue
+ needsUpdate := false
+ // For filter-weigher pipelines, only weighers may depend on knowledges.
+ for _, step := range pipeline.Spec.Weighers {
+ for _, knowledgeRef := range step.Knowledges {
+ if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
+ needsUpdate = true
+ break
+ }
+ }
}
- knowledgeDeps := c.Initializer.CollectKnowledgeDependencies(pipeline)
- found := false
- for _, knowledgeName := range knowledgeDeps {
- if knowledgeName == obj.Name {
- found = true
- break
+ // Check descheduler pipelines where detectors may depend on knowledges.
+ for _, step := range pipeline.Spec.Detectors {
+ for _, knowledgeRef := range step.Knowledges {
+ if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
+ needsUpdate = true
+ break
+ }
}
}
- if !found {
- continue
+ if needsUpdate {
+ log.Info("re-evaluating pipeline due to knowledge change", "pipelineName", pipeline.Name)
+ c.handlePipelineChange(ctx, &pipeline, queue)
}
- log.Info("re-evaluating pipeline due to knowledge change", "pipelineName", pipeline.Name, "knowledgeName", obj.Name)
- c.handlePipelineChange(ctx, &pipeline, queue)
}
}
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 76dd08e53..7a85999b6 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -25,23 +25,15 @@ type mockPipeline struct {
// Mock PipelineInitializer for testing
type mockPipelineInitializer struct {
- pipelineType v1alpha1.PipelineType
- initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error)
- collectKnowledgeDependenciesFunc func(p v1alpha1.Pipeline) []string
+ pipelineType v1alpha1.PipelineType
+ initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error)
}
-func (m *mockPipelineInitializer) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error) {
+func (m *mockPipelineInitializer) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
if m.initPipelineFunc != nil {
return m.initPipelineFunc(ctx, p)
}
- return mockPipeline{name: p.Name}, 0, 0, nil
-}
-
-func (m *mockPipelineInitializer) CollectKnowledgeDependencies(p v1alpha1.Pipeline) []string {
- if m.collectKnowledgeDependenciesFunc != nil {
- return m.collectKnowledgeDependenciesFunc(p)
- }
- return nil
+ return mockPipeline{name: p.Name}, nil
}
func (m *mockPipelineInitializer) PipelineType() v1alpha1.PipelineType {
@@ -80,8 +72,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -100,8 +92,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -111,8 +103,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -122,8 +114,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeDescheduler,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -133,8 +125,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -210,12 +202,12 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{
+ Filters: []v1alpha1.FilterSpec{
{
Name: "test-filter",
},
},
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -249,7 +241,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -273,7 +265,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -291,7 +283,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -319,8 +311,8 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
}
if tt.initPipelineError {
- initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, int, int, error) {
- return mockPipeline{}, 0, 0, context.Canceled
+ initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
+ return mockPipeline{}, context.Canceled
}
}
@@ -369,8 +361,8 @@ func TestBasePipelineController_HandlePipelineCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -414,8 +406,8 @@ func TestBasePipelineController_HandlePipelineUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -489,6 +481,157 @@ func TestBasePipelineController_HandlePipelineDeleted(t *testing.T) {
}
}
+func TestBasePipelineController_checkAllKnowledgesReady(t *testing.T) {
+ scheme := runtime.NewScheme()
+ if err := v1alpha1.AddToScheme(scheme); err != nil {
+ t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
+ }
+
+ tests := []struct {
+ name string
+ knowledges []v1alpha1.Knowledge
+ expectError bool
+ }{
+ {
+ name: "no knowledges",
+ knowledges: []v1alpha1.Knowledge{},
+ expectError: false,
+ },
+ {
+ name: "ready knowledge",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "ready-knowledge",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ },
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "knowledge in error state",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "error-knowledge",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionFalse,
+ },
+ },
+ },
+ },
+ },
+ expectError: true,
+ },
+ {
+ name: "knowledge with no data",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "no-data-knowledge",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 0,
+ },
+ },
+ },
+ expectError: true,
+ },
+ {
+ name: "multiple knowledges, all ready",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge-1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge-2",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 5,
+ },
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "multiple knowledges, some not ready",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "ready-knowledge",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "not-ready-knowledge",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 0,
+ },
+ },
+ },
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ objects := make([]client.Object, len(tt.knowledges))
+ for i := range tt.knowledges {
+ objects[i] = &tt.knowledges[i]
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objects...).
+ Build()
+
+ controller := &BasePipelineController[mockPipeline]{
+ Client: fakeClient,
+ }
+
+ objectReferences := make([]corev1.ObjectReference, len(tt.knowledges))
+ for i, k := range tt.knowledges {
+ objectReferences[i] = corev1.ObjectReference{
+ Name: k.Name,
+ Namespace: k.Namespace,
+ }
+ }
+ err := controller.checkAllKnowledgesReady(context.Background(), objectReferences)
+
+ if tt.expectError && err == nil {
+ t.Error("Expected error but got none")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("Expected no error but got: %v", err)
+ }
+ })
+ }
+}
+
func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
@@ -524,7 +667,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -541,7 +684,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -574,7 +717,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -651,7 +794,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -802,7 +945,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -870,7 +1013,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index 95be1919e..dcb1f4e02 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -18,7 +18,7 @@ type mockFilter struct {
name string
}
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
@@ -36,7 +36,7 @@ type mockWeigher struct {
name string
}
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
return nil
}
@@ -49,10 +49,6 @@ func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*
}, nil
}
-func (m *mockWeigher) RequiredKnowledges() []string {
- return []string{}
-}
-
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &pipeline[mockPipelineRequest]{
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index d7cd4e9e9..caac1ebf7 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -10,7 +10,6 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/pkg/conf"
- "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -31,9 +30,9 @@ type EmptyStepOpts struct{}
func (EmptyStepOpts) Validate() error { return nil }
// Interface for a scheduler step.
-type Step[RequestType PipelineRequest] interface {
+type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
// Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+ Init(ctx context.Context, client client.Client, step StepType) error
// Run this step of the scheduling pipeline.
//
@@ -53,23 +52,15 @@ type Step[RequestType PipelineRequest] interface {
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-// Step that acts as a filter in the scheduling pipeline.
-type Filter[RequestType PipelineRequest] = Step[RequestType]
-
// Step that acts as a weigher in the scheduling pipeline.
-type Weigher[RequestType PipelineRequest] interface {
- Step[RequestType]
-
- // Weighers can define knowledges they depend on, which should be
- // ready to be able to execute the weigher properly.
- // The returned slice contains the names of the knowledges which
- // can be found as kubernetes custom resources of kind Knowledge.
- RequiredKnowledges() []string
-}
+type Weigher[RequestType PipelineRequest] = Step[RequestType, v1alpha1.WeigherSpec]
+
+// Step that acts as a filter in the scheduling pipeline.
+type Filter[RequestType PipelineRequest] = Step[RequestType, v1alpha1.FilterSpec]
// Common base for all steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
+type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The activation function to use.
@@ -78,24 +69,17 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
Client client.Client
}
-// Common base implementation of a filter step.
-// Functionally identical to BaseStep, but used for clarity.
-type BaseFilter[RequestType PipelineRequest, Opts StepOpts] struct {
- BaseStep[RequestType, Opts]
-}
-
// Common base implementation of a weigher step.
// Functionally identical to BaseStep, but used for clarity.
-type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] struct {
- BaseStep[RequestType, Opts]
-}
+type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.WeigherSpec]
-// Override to specify required knowledges for this weigher.
-func (s *BaseWeigher[RequestType, Opts]) RequiredKnowledges() []string { return []string{} }
+// Common base implementation of a filter step.
+// Functionally identical to BaseStep, but used for clarity.
+type BaseFilter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.FilterSpec]
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- opts := conf.NewRawOptsBytes(step.Opts.Raw)
+func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
+ opts := conf.NewRawOptsBytes(step.GetOpts().Raw)
if err := s.Load(opts); err != nil {
return err
}
@@ -107,30 +91,9 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl
return nil
}
-// Weighers need to check if all dependency knowledges are available.
-func (s *BaseWeigher[RequestType, Opts]) Init(ctx context.Context, c client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, c, step); err != nil {
- return err
- }
- for _, knowledgeName := range s.RequiredKnowledges() {
- knowledge := &v1alpha1.Knowledge{}
- if err := c.Get(ctx, client.ObjectKey{Name: knowledgeName}, knowledge); err != nil {
- return err
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return errors.New("knowledge not ready: " + knowledgeName)
- }
- if knowledge.Status.RawLength == 0 {
- return errors.New("knowledge has no data: " + knowledgeName)
- }
- }
- return nil
-}
-
// Get a default result (no action) for the input weight keys given in the request.
// Use this to initialize the result before applying filtering/weighing logic.
-func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
+func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
activations := make(map[string]float64)
for _, subject := range request.GetSubjects() {
activations[subject] = s.NoEffect()
@@ -140,7 +103,7 @@ func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request Request
}
// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
+func (s *BaseStep[RequestType, Opts, StepType]) PrepareStats(request RequestType, unit string) StepStatistics {
return StepStatistics{
Unit: unit,
Subjects: make(map[string]float64, len(request.GetSubjects())),
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 2e361c1b3..42601a0bb 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest] struct {
+type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType]
+ Step Step[RequestType, StepType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -44,32 +44,32 @@ type StepMonitor[RequestType PipelineRequest] struct {
}
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest](
+func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
_ context.Context,
_ client.Client,
- step v1alpha1.StepSpec,
- impl Step[RequestType],
+ step StepType,
+ impl Step[RequestType, StepType],
m PipelineMonitor,
-) *StepMonitor[RequestType] {
+) *StepMonitor[RequestType, StepType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, step.GetName())
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, step.GetName())
}
- return &StepMonitor[RequestType]{
+ return &StepMonitor[RequestType, StepType]{
Step: impl,
- stepName: step.Name,
+ stepName: step.GetName(),
pipelineName: m.PipelineName,
runTimer: runTimer,
stepSubjectWeight: m.stepSubjectWeight,
@@ -80,7 +80,7 @@ func monitorStep[RequestType PipelineRequest](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index c248ec576..fea4e04e7 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -7,6 +7,8 @@ import (
"log/slog"
"os"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
type mockObserver struct {
@@ -21,9 +23,9 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest]{
+ monitor := &StepMonitor[mockPipelineRequest, v1alpha1.WeigherSpec]{
stepName: "mock_step",
- Step: &mockStep[mockPipelineRequest]{
+ Step: &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index 8d826bc7b..a1940355f 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -11,20 +11,17 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest] struct {
- InitFunc func(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+type mockStep[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
+ InitFunc func(ctx context.Context, client client.Client, step StepType) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockStep[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockStep[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
-func (m *mockStep[RequestType]) RequiredKnowledges() []string {
- return []string{}
-}
type MockOptions struct {
Option1 string `json:"option1"`
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index e2a594de8..a86e19ec5 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -19,7 +19,7 @@ type WeigherValidator[RequestType PipelineRequest] struct {
}
// Initialize the wrapped weigher with the database and options.
-func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
slog.Info("scheduler: init validation for step", "name", step.Name)
return s.Weigher.Init(ctx, client, step)
}
@@ -52,8 +52,3 @@ func (s *WeigherValidator[RequestType]) Run(traceLog *slog.Logger, request Reque
}
return result, nil
}
-
-// Return the required knowledges for this weigher.
-func (s *WeigherValidator[RequestType]) RequiredKnowledges() []string {
- return s.Weigher.RequiredKnowledges()
-}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index aa6cba851..b97796c3f 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -7,10 +7,12 @@ import (
"log/slog"
"reflect"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
@@ -45,7 +47,7 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
From 9c687069d7b1f046175134e17d31b382eaa35e8d Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Fri, 23 Jan 2026 08:46:49 +0100
Subject: [PATCH 11/41] Simplify spec again
---
api/v1alpha1/pipeline_types.go | 88 ++-----------------
api/v1alpha1/zz_generated.deepcopy.go | 51 ++---------
config/crd/bases/cortex.cloud_pipelines.yaml | 88 +++++++++++++------
config/crd/cortex.cloud_pipelines.yaml | 88 +++++++++++++------
.../templates/crd/cortex.cloud_pipelines.yaml | 88 +++++++++++++------
.../cinder/pipeline_controller_test.go | 36 ++++----
.../decisions/cinder/supported_steps.go | 4 +-
.../scheduling/decisions/machines/noop.go | 2 +-
.../machines/pipeline_controller_test.go | 24 ++---
.../decisions/machines/supported_steps.go | 4 +-
.../manila/pipeline_controller_test.go | 32 +++----
.../weighers/netapp_cpu_usage_balancing.go | 2 +-
.../decisions/manila/supported_steps.go | 4 +-
.../nova/pipeline_controller_test.go | 60 ++++++-------
.../filters/filter_allowed_projects.go | 2 +-
.../plugins/filters/filter_capabilities.go | 2 +-
.../nova/plugins/filters/filter_correct_az.go | 2 +-
.../filters/filter_external_customer.go | 2 +-
.../filters/filter_has_accelerators.go | 2 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filters/filter_has_requested_traits.go | 2 +-
.../filters/filter_host_instructions.go | 2 +-
.../filters/filter_instance_group_affinity.go | 2 +-
.../filter_instance_group_anti_affinity.go | 2 +-
.../plugins/filters/filter_live_migratable.go | 2 +-
.../filters/filter_live_migratable_test.go | 6 +-
.../plugins/filters/filter_maintenance.go | 2 +-
.../filters/filter_packed_virtqueue.go | 2 +-
.../filters/filter_requested_destination.go | 2 +-
.../filter_requested_destination_test.go | 4 +-
.../filters/filter_status_conditions.go | 2 +-
.../vmware_anti_affinity_noisy_projects.go | 2 +-
.../vmware_avoid_long_term_contended_hosts.go | 2 +-
...vmware_avoid_short_term_contended_hosts.go | 2 +-
.../vmware_general_purpose_balancing.go | 2 +-
.../weighers/vmware_hana_binpacking.go | 2 +-
.../decisions/nova/supported_steps.go | 4 +-
.../pods/pipeline_controller_test.go | 24 ++---
.../plugins/filters/filter_node_affinity.go | 2 +-
.../plugins/filters/filter_node_available.go | 2 +-
.../plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_noop.go | 2 +-
.../pods/plugins/filters/filter_taint.go | 2 +-
.../pods/plugins/weighers/binpack.go | 2 +-
.../decisions/pods/supported_steps.go | 4 +-
.../scheduling/descheduling/nova/monitor.go | 4 +-
.../descheduling/nova/monitor_test.go | 16 ++--
.../scheduling/descheduling/nova/pipeline.go | 2 +-
.../nova/pipeline_controller_test.go | 10 +--
.../descheduling/nova/pipeline_test.go | 12 +--
.../descheduling/nova/plugins/base.go | 2 +-
.../descheduling/nova/plugins/base_test.go | 2 +-
internal/scheduling/descheduling/nova/step.go | 2 +-
internal/scheduling/lib/pipeline.go | 25 +++---
.../lib/pipeline_controller_test.go | 50 +++++------
internal/scheduling/lib/pipeline_test.go | 12 +--
internal/scheduling/lib/step.go | 28 ++----
internal/scheduling/lib/step_monitor.go | 24 ++---
internal/scheduling/lib/step_monitor_test.go | 6 +-
internal/scheduling/lib/step_test.go | 8 +-
internal/scheduling/lib/weigher_validation.go | 6 +-
.../scheduling/lib/weigher_validation_test.go | 6 +-
62 files changed, 431 insertions(+), 449 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 449ec0309..ae1259d96 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -9,25 +9,12 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
-// Step as part of a cortex pipeline.
-type Step interface {
- // Every step must have options so the pipeline can configure it.
- GetOpts() runtime.RawExtension
- // Every step must have a name so the pipeline can identify it.
- GetName() string
- // Every step can have an optional description.
- GetDescription() string
-}
-
-// Filters remove host candidates from an initial set, leaving
-// valid candidates. Filters are run before weighers are applied, as
-// part of a filter-weigher scheduling pipeline.
-type FilterSpec struct {
+type StepSpec struct {
// The name of the scheduler step in the cortex implementation.
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
- // Additional configuration for the extractor that can be used
+ // Additional configuration for the step that can be used
// +kubebuilder:validation:Optional
Opts runtime.RawExtension `json:"opts,omitempty"`
@@ -36,72 +23,13 @@ type FilterSpec struct {
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
- // Filters are not allowed to depend on knowledges, as knowledges can
- // be outdated leading to invalid filtering decisions.
-}
-
-func (f FilterSpec) GetOpts() runtime.RawExtension { return f.Opts }
-func (f FilterSpec) GetName() string { return f.Name }
-func (f FilterSpec) GetDescription() string { return f.Description }
-
-// Weighers assign weights to the remaining host candidates after filtering,
-// making some hosts more preferable than others. Weighers are run
-// after filters are applied, as part of a filter-weigher scheduling pipeline.
-type WeigherSpec struct {
- // The name of the scheduler step in the cortex implementation.
- // Must match to a step implemented by the pipeline controller.
- Name string `json:"name"`
-
- // Additional configuration for the extractor that can be used
- // +kubebuilder:validation:Optional
- Opts runtime.RawExtension `json:"opts,omitempty"`
-
- // Additional description of the step which helps understand its purpose
- // and decisions made by it.
- // +kubebuilder:validation:Optional
- Description string `json:"description,omitempty"`
-
- // Knowledges this step depends on to be ready.
- //
- // Weighers can depend on knowledges as they don't break valid placements,
- // they only make it more optimal.
+ // If required, steps can specify knowledges on which they depend.
+ // Changes to the knowledges' readiness will trigger re-evaluation of
+ // pipelines containing this step.
// +kubebuilder:validation:Optional
Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
-func (w WeigherSpec) GetOpts() runtime.RawExtension { return w.Opts }
-func (w WeigherSpec) GetName() string { return w.Name }
-func (w WeigherSpec) GetDescription() string { return w.Description }
-
-// Detectors find candidates for descheduling (migration off current host).
-// These detectors are run after weighers are applied, as part of a
-// descheduler scheduling pipeline.
-type DetectorSpec struct {
- // The name of the scheduler step in the cortex implementation.
- // Must match to a step implemented by the pipeline controller.
- Name string `json:"name"`
-
- // Additional configuration for the extractor that can be used
- // +kubebuilder:validation:Optional
- Opts runtime.RawExtension `json:"opts,omitempty"`
-
- // Additional description of the step which helps understand its purpose
- // and decisions made by it.
- // +kubebuilder:validation:Optional
- Description string `json:"description,omitempty"`
-
- // Knowledges this step depends on to be ready.
- //
- // Detectors can depend on knowledges as they don't ensure valid placements
- // and therefore are not on the critical path.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
-}
-
-func (d DetectorSpec) GetOpts() runtime.RawExtension { return d.Opts }
-func (d DetectorSpec) GetName() string { return d.Name }
-func (d DetectorSpec) GetDescription() string { return d.Description }
-
type PipelineType string
const (
@@ -144,14 +72,14 @@ type PipelineSpec struct {
// Filters remove host candidates from an initial set, leaving
// valid candidates. Filters are run before weighers are applied.
// +kubebuilder:validation:Optional
- Filters []FilterSpec `json:"filters,omitempty"`
+ Filters []StepSpec `json:"filters,omitempty"`
// Ordered list of weighers to apply in a scheduling pipeline.
//
// This attribute is set only if the pipeline type is filter-weigher.
// These weighers are run after filters are applied.
// +kubebuilder:validation:Optional
- Weighers []WeigherSpec `json:"weighers,omitempty"`
+ Weighers []StepSpec `json:"weighers,omitempty"`
// Ordered list of detectors to apply in a descheduling pipeline.
//
@@ -159,7 +87,7 @@ type PipelineSpec struct {
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied.
// +kubebuilder:validation:Optional
- Detectors []DetectorSpec `json:"detectors,omitempty"`
+ Detectors []StepSpec `json:"detectors,omitempty"`
}
const (
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 785455a08..c120577d1 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -425,43 +425,6 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
- *out = *in
- in.Opts.DeepCopyInto(&out.Opts)
- if in.Knowledges != nil {
- in, out := &in.Knowledges, &out.Knowledges
- *out = make([]v1.ObjectReference, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec.
-func (in *DetectorSpec) DeepCopy() *DetectorSpec {
- if in == nil {
- return nil
- }
- out := new(DetectorSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
- *out = *in
- in.Opts.DeepCopyInto(&out.Opts)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
-func (in *FilterSpec) DeepCopy() *FilterSpec {
- if in == nil {
- return nil
- }
- out := new(FilterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityDatasource) DeepCopyInto(out *IdentityDatasource) {
*out = *in
@@ -879,21 +842,21 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
- *out = make([]FilterSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Weighers != nil {
in, out := &in.Weighers, &out.Weighers
- *out = make([]WeigherSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Detectors != nil {
in, out := &in.Detectors, &out.Detectors
- *out = make([]DetectorSpec, len(*in))
+ *out = make([]StepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1135,7 +1098,7 @@ func (in *StepResult) DeepCopy() *StepResult {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
+func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
in.Opts.DeepCopyInto(&out.Opts)
if in.Knowledges != nil {
@@ -1145,12 +1108,12 @@ func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherSpec.
-func (in *WeigherSpec) DeepCopy() *WeigherSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
+func (in *StepSpec) DeepCopy() *StepSpec {
if in == nil {
return nil
}
- out := new(WeigherSpec)
+ out := new(StepSpec)
in.DeepCopyInto(out)
return out
}
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 794acf0b1..f8a096cb1 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -73,10 +73,6 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -85,10 +81,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -140,8 +135,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -156,24 +151,70 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -205,10 +246,6 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -217,10 +254,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -272,8 +308,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 794acf0b1..f8a096cb1 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -73,10 +73,6 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -85,10 +81,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -140,8 +135,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -156,24 +151,70 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -205,10 +246,6 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -217,10 +254,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -272,8 +308,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 679bab0fb..d8a6098d8 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -79,10 +79,6 @@ spec:
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
- description: |-
- Detectors find candidates for descheduling (migration off current host).
- These detectors are run after weighers are applied, as part of a
- descheduler scheduling pipeline.
properties:
description:
description: |-
@@ -91,10 +87,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Detectors can depend on knowledges as they don't ensure valid placements
- and therefore are not on the critical path.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -146,8 +141,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -162,24 +157,70 @@ spec:
Filters remove host candidates from an initial set, leaving
valid candidates. Filters are run before weighers are applied.
items:
- description: |-
- Filters remove host candidates from an initial set, leaving
- valid candidates. Filters are run before weighers are applied, as
- part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ knowledges:
+ description: |-
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
+ items:
+ description: ObjectReference contains enough information to
+ let you inspect or modify the referred object.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -211,10 +252,6 @@ spec:
This attribute is set only if the pipeline type is filter-weigher.
These weighers are run after filters are applied.
items:
- description: |-
- Weighers assign weights to the remaining host candidates after filtering,
- making some hosts more preferable than others. Weighers are run
- after filters are applied, as part of a filter-weigher scheduling pipeline.
properties:
description:
description: |-
@@ -223,10 +260,9 @@ spec:
type: string
knowledges:
description: |-
- Knowledges this step depends on to be ready.
-
- Weighers can depend on knowledges as they don't break valid placements,
- they only make it more optimal.
+ If required, steps can specify knowledges on which they depend.
+ Changes to the knowledges' readiness will trigger re-evaluation of
+ pipelines containing this step.
items:
description: ObjectReference contains enough information to
let you inspect or modify the referred object.
@@ -278,8 +314,8 @@ spec:
Must match to a step implemented by the pipeline controller.
type: string
opts:
- description: Additional configuration for the extractor that
- can be used
+ description: Additional configuration for the step that can
+ be used
type: object
x-kubernetes-preserve-unknown-fields: true
required:
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 2b7cda1bf..7121f5166 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -175,8 +175,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
})
if err != nil {
@@ -284,8 +284,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -318,8 +318,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -372,8 +372,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -476,24 +476,24 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "test-plugin",
},
},
- weighers: []v1alpha1.WeigherSpec{
+ weighers: []v1alpha1.StepSpec{
{
Name: "test-plugin",
},
diff --git a/internal/scheduling/decisions/cinder/supported_steps.go b/internal/scheduling/decisions/cinder/supported_steps.go
index 90e5dc95d..9903bdb2f 100644
--- a/internal/scheduling/decisions/cinder/supported_steps.go
+++ b/internal/scheduling/decisions/cinder/supported_steps.go
@@ -8,12 +8,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type CinderWeigher = lib.Weigher[api.ExternalSchedulerRequest]
+type CinderWeigher = lib.Step[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the cinder scheduling.
var supportedWeighers = map[string]func() CinderWeigher{}
-type CinderFilter = lib.Filter[api.ExternalSchedulerRequest]
+type CinderFilter = lib.Step[api.ExternalSchedulerRequest]
// Configuration of filters supported by the cinder scheduling.
var supportedFilters = map[string]func() CinderFilter{}
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index 6dfa7911b..3b0104aa6 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -15,7 +15,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 157b8ac13..c218486bb 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -211,26 +211,26 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{Name: "noop"},
},
expectError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{Name: "unsupported"},
},
expectError: true,
@@ -315,8 +315,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -349,8 +349,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -396,8 +396,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/machines/supported_steps.go b/internal/scheduling/decisions/machines/supported_steps.go
index 4e04d64d1..730c92ded 100644
--- a/internal/scheduling/decisions/machines/supported_steps.go
+++ b/internal/scheduling/decisions/machines/supported_steps.go
@@ -8,12 +8,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type MachineWeigher = lib.Weigher[ironcore.MachinePipelineRequest]
+type MachineWeigher = lib.Step[ironcore.MachinePipelineRequest]
// Configuration of weighers supported by the machine scheduling.
var supportedWeighers = map[string]func() MachineWeigher{}
-type MachineFilter = lib.Filter[ironcore.MachinePipelineRequest]
+type MachineFilter = lib.Step[ironcore.MachinePipelineRequest]
// Configuration of filters supported by the machine scheduling.
var supportedFilters = map[string]func() MachineFilter{
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index d280f0e05..ea8e20c14 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -279,8 +279,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -313,8 +313,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -367,8 +367,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -471,19 +471,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "supported netapp step",
- weighers: []v1alpha1.WeigherSpec{
+ weighers: []v1alpha1.StepSpec{
{
Name: "netapp_cpu_usage_balancing",
Opts: runtime.RawExtension{
@@ -495,7 +495,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported-plugin",
},
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index 97a8d0d86..944e38d5a 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -45,7 +45,7 @@ func (o NetappCPUUsageBalancingStepOpts) Validate() error {
// Step to balance CPU usage by avoiding highly used storage pools.
type NetappCPUUsageBalancingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/manila/supported_steps.go b/internal/scheduling/decisions/manila/supported_steps.go
index fca819711..aee4af194 100644
--- a/internal/scheduling/decisions/manila/supported_steps.go
+++ b/internal/scheduling/decisions/manila/supported_steps.go
@@ -9,12 +9,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type ManilaFilter = lib.Filter[api.ExternalSchedulerRequest]
+type ManilaFilter = lib.Step[api.ExternalSchedulerRequest]
// Configuration of filters supported by the manila scheduler.
var supportedFilters = map[string]func() ManilaFilter{}
-type ManilaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
+type ManilaWeigher = lib.Step[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the manila scheduler.
var supportedWeighers = map[string]func() ManilaWeigher{
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index b02face53..69ead2c8c 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -92,8 +92,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: false,
@@ -121,8 +121,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -173,8 +173,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
expectError: true,
@@ -264,19 +264,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "supported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
},
@@ -285,7 +285,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported-plugin",
},
@@ -294,7 +294,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with scoping options",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -306,7 +306,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with invalid scoping options",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
Opts: runtime.RawExtension{
@@ -418,8 +418,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -430,8 +430,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -466,8 +466,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -478,8 +478,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -538,8 +538,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -550,8 +550,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -588,8 +588,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
@@ -626,8 +626,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
setupPipelineConfigs: true,
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index 96815c618..3e08273a0 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -14,7 +14,7 @@ import (
)
type FilterAllowedProjectsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Lock certain hosts for certain projects, based on the hypervisor spec.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index 80dfa5b3c..31d10fd27 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -15,7 +15,7 @@ import (
)
type FilterCapabilitiesStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Get the provided capabilities of a hypervisor resource in the format Nova expects.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index dfcdc9f4b..5bfaab618 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -13,7 +13,7 @@ import (
)
type FilterCorrectAZStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Only get hosts in the requested az.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index b995be916..cc34afd07 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -28,7 +28,7 @@ func (opts FilterExternalCustomerStepOpts) Validate() error {
}
type FilterExternalCustomerStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
}
// Prefix-match the domain name for external customer domains and filter out hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 0a5b1339f..8320168a2 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -14,7 +14,7 @@ import (
)
type FilterHasAcceleratorsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with accelerators.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index 4b07ef56c..b80173473 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -23,7 +23,7 @@ type FilterHasEnoughCapacityOpts struct {
func (FilterHasEnoughCapacityOpts) Validate() error { return nil }
type FilterHasEnoughCapacity struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
}
// Filter hosts that don't have enough capacity to run the requested flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index 35367dff3..ea4f81379 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -15,7 +15,7 @@ import (
)
type FilterHasRequestedTraits struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts that do not have the requested traits given by the extra spec:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index cd57e2e4d..66a9fccf3 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -12,7 +12,7 @@ import (
)
type FilterHostInstructionsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts based on instructions given in the request spec. Supported are:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index fb42e7c19..1b549aac4 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -12,7 +12,7 @@ import (
)
type FilterInstanceGroupAffinityStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts in spec.instance_group.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index e9390d9c3..00243d6cb 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -14,7 +14,7 @@ import (
)
type FilterInstanceGroupAntiAffinityStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts not in spec_obj.instance_group but only until
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index f31e72516..1076898b1 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -15,7 +15,7 @@ import (
)
type FilterLiveMigratableStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check if the encountered request spec is a live migration.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index edb33c5c0..c719a3eb6 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -727,7 +727,7 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -812,7 +812,7 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -856,7 +856,7 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index a8d386c4d..867317496 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -13,7 +13,7 @@ import (
)
type FilterMaintenanceStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index bb443ef57..f0066218b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -14,7 +14,7 @@ import (
)
type FilterPackedVirtqueueStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with packed virtqueues.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index c9f0319fb..88285edd1 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -14,7 +14,7 @@ import (
)
type FilterRequestedDestinationStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If `requested_destination` is set in the request spec, filter hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index 3ba008214..ca1faaa07 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -494,7 +494,7 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
@@ -575,7 +575,7 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
Client: fakeClient,
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index 870aaa58b..a0b96e0ed 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -15,7 +15,7 @@ import (
)
type FilterStatusConditionsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that all status conditions meet the expected values, for example,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 3d04ce8e1..5dcb35c12 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -36,7 +36,7 @@ func (o VMwareAntiAffinityNoisyProjectsStepOpts) Validate() error {
// Step to avoid noisy projects by downvoting the hosts they are running on.
type VMwareAntiAffinityNoisyProjectsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
}
// Downvote the hosts a project is currently running on if it's noisy.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index 14396e165..9908c4ca3 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -45,7 +45,7 @@ func (o VMwareAvoidLongTermContendedHostsStepOpts) Validate() error {
// Step to avoid long term contended hosts by downvoting them.
type VMwareAvoidLongTermContendedHostsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index 34f3514e9..7b6cba041 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -45,7 +45,7 @@ func (o VMwareAvoidShortTermContendedHostsStepOpts) Validate() error {
// Step to avoid recently contended hosts by downvoting them.
type VMwareAvoidShortTermContendedHostsStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
}
// Downvote hosts that are highly contended.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index e3b54d972..95a76f4ad 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -35,7 +35,7 @@ func (o VMwareGeneralPurposeBalancingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareGeneralPurposeBalancingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
}
// Pack VMs on hosts based on their flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index 28915a03a..008c04266 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -35,7 +35,7 @@ func (o VMwareHanaBinpackingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareHanaBinpackingStep struct {
// Weigher is a helper struct that provides common functionality for all steps.
- lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
+ lib.BaseStep[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
}
// Pack VMs on hosts based on their flavor.
diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_steps.go
index 4821e7ba5..57957589d 100644
--- a/internal/scheduling/decisions/nova/supported_steps.go
+++ b/internal/scheduling/decisions/nova/supported_steps.go
@@ -10,7 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type NovaFilter = lib.Filter[api.ExternalSchedulerRequest]
+type NovaFilter = lib.Step[api.ExternalSchedulerRequest]
// Configuration of filters supported by the nova scheduler.
var supportedFilters = map[string]func() NovaFilter{
@@ -31,7 +31,7 @@ var supportedFilters = map[string]func() NovaFilter{
"filter_requested_destination": func() NovaFilter { return &filters.FilterRequestedDestinationStep{} },
}
-type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
+type NovaWeigher = lib.Step[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the nova scheduler.
var supportedWeighers = map[string]func() NovaWeigher{
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 4d93a1720..4e6a6f249 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -186,19 +186,19 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.FilterSpec
- weighers []v1alpha1.WeigherSpec
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
expectError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.FilterSpec{},
- weighers: []v1alpha1.WeigherSpec{},
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
expectError: false,
},
{
name: "noop step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "noop",
},
@@ -207,7 +207,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.FilterSpec{
+ filters: []v1alpha1.StepSpec{
{
Name: "unsupported",
},
@@ -292,8 +292,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
@@ -326,8 +326,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: false,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: false,
@@ -373,8 +373,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index acacc6ea6..265bffa24 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -19,7 +19,7 @@ type NodeAffinityFilter struct {
Alias string
}
-func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index c668e5f0a..45ae98067 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -18,7 +18,7 @@ type NodeAvailableFilter struct {
Alias string
}
-func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 70e897b6a..44d185580 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -19,7 +19,7 @@ type NodeCapacityFilter struct {
Alias string
}
-func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 08fbf1cd4..3cd328a50 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -18,7 +18,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 697c41466..82135b161 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -18,7 +18,7 @@ type TaintFilter struct {
Alias string
}
-func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index 65ca207ab..f5572ae96 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -28,7 +28,7 @@ func (o BinpackingStepOpts) Validate() error {
}
type BinpackingStep struct {
- lib.BaseWeigher[api.PodPipelineRequest, BinpackingStepOpts]
+ lib.BaseStep[api.PodPipelineRequest, BinpackingStepOpts]
}
func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.StepResult, error) {
diff --git a/internal/scheduling/decisions/pods/supported_steps.go b/internal/scheduling/decisions/pods/supported_steps.go
index 43c8f1ac2..57e2d8151 100644
--- a/internal/scheduling/decisions/pods/supported_steps.go
+++ b/internal/scheduling/decisions/pods/supported_steps.go
@@ -10,7 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type PodFilter = lib.Filter[pods.PodPipelineRequest]
+type PodFilter = lib.Step[pods.PodPipelineRequest]
// Configuration of filters supported by the pods scheduler.
var supportedFilters = map[string]func() PodFilter{
@@ -20,7 +20,7 @@ var supportedFilters = map[string]func() PodFilter{
"nodecapacity": func() PodFilter { return &filters.NodeCapacityFilter{} },
}
-type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
+type PodWeigher = lib.Step[pods.PodPipelineRequest]
// Configuration of weighers supported by the pods scheduler.
var supportedWeighers = map[string]func() PodWeigher{
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index 6fd248321..ea7a48163 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -83,7 +83,7 @@ type StepMonitor struct {
}
// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
+func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -101,7 +101,7 @@ func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMon
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return m.step.Init(ctx, client, step)
}
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index ed7416848..1f8e658de 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -80,7 +80,7 @@ type mockMonitorStep struct {
runCalled bool
}
-func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
m.initCalled = true
return m.initError
}
@@ -97,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.DetectorSpec{Name: "test-step"}
+ conf := v1alpha1.StepSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index 08c26213d..d1c3445cf 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -33,7 +33,7 @@ type Pipeline struct {
func (p *Pipeline) Init(
ctx context.Context,
- confedSteps []v1alpha1.DetectorSpec,
+ confedSteps []v1alpha1.StepSpec,
supportedSteps map[string]Step,
) error {
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 86a254d93..57b8231a5 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -33,20 +33,20 @@ type mockControllerStep struct{}
func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
return nil, nil
}
-func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.DetectorSpec
+ steps []v1alpha1.StepSpec
expectError bool
expectedError string
}{
{
name: "successful pipeline initialization",
- steps: []v1alpha1.DetectorSpec{
+ steps: []v1alpha1.StepSpec{
{
Name: "mock-step",
},
@@ -55,7 +55,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- steps: []v1alpha1.DetectorSpec{
+ steps: []v1alpha1.StepSpec{
{
Name: "unsupported",
},
@@ -65,7 +65,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "empty steps",
- steps: []v1alpha1.DetectorSpec{},
+ steps: []v1alpha1.StepSpec{},
expectError: false,
},
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index d006f2b8f..4ed4f327e 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -30,7 +30,7 @@ func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
return m.decisions, nil
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
if m.initError != nil {
return m.initError
}
@@ -42,7 +42,7 @@ func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
supportedSteps map[string]Step
- confedSteps []v1alpha1.DetectorSpec
+ confedSteps []v1alpha1.StepSpec
expectedSteps int
expectedError bool
}{
@@ -51,7 +51,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "test-step",
}},
expectedSteps: 1,
@@ -61,7 +61,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "unsupported-step",
}},
expectedError: true,
@@ -71,7 +71,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
- confedSteps: []v1alpha1.DetectorSpec{{
+ confedSteps: []v1alpha1.StepSpec{{
Name: "failing-step",
}},
expectedError: true,
@@ -82,7 +82,7 @@ func TestPipeline_Init(t *testing.T) {
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.DetectorSpec{
+ confedSteps: []v1alpha1.StepSpec{
{
Name: "step1",
},
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index c1b6ea902..7c024b71f 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -21,7 +21,7 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index a0f581c0a..524c69547 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -23,7 +23,7 @@ func (o MockOptions) Validate() error {
func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
- err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
+ err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
Opts: runtime.RawExtension{Raw: []byte(`{
"option1": "value1",
"option2": 2
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/step.go
index 552edf87b..7c53bc991 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/step.go
@@ -21,5 +21,5 @@ type Step interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 82f3ef528..68f4401dd 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -30,31 +30,31 @@ type pipeline[RequestType PipelineRequest] struct {
// The order in which filters are applied, by their step name.
filtersOrder []string
// The filters by their name.
- filters map[string]Filter[RequestType]
+ filters map[string]Step[RequestType]
// The order in which weighers are applied, by their step name.
weighersOrder []string
// The weighers by their name.
- weighers map[string]Weigher[RequestType]
+ weighers map[string]Step[RequestType]
// Monitor to observe the pipeline.
monitor PipelineMonitor
}
-type StepWrapper[RequestType PipelineRequest, StepType v1alpha1.Step] func(
+type StepWrapper[RequestType PipelineRequest] func(
ctx context.Context,
client client.Client,
- step StepType,
- impl Step[RequestType, StepType],
-) (Step[RequestType, StepType], error)
+ step v1alpha1.StepSpec,
+ impl Step[RequestType],
+) (Step[RequestType], error)
// Create a new pipeline with filters and weighers contained in the configuration.
func NewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
- supportedFilters map[string]func() Filter[RequestType],
- confedFilters []v1alpha1.FilterSpec,
- supportedWeighers map[string]func() Weigher[RequestType],
- confedWeighers []v1alpha1.WeigherSpec,
+ supportedFilters map[string]func() Step[RequestType],
+ confedFilters []v1alpha1.StepSpec,
+ supportedWeighers map[string]func() Step[RequestType],
+ confedWeighers []v1alpha1.StepSpec,
monitor PipelineMonitor,
) (Pipeline[RequestType], error) {
@@ -68,7 +68,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all filters from the configuration.
- filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
+ filtersByName := make(map[string]Step[RequestType], len(confedFilters))
filtersOrder := []string{}
for _, filterConfig := range confedFilters {
slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
@@ -88,7 +88,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all weighers from the configuration.
- weighersByName := make(map[string]Weigher[RequestType], len(confedWeighers))
+ weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
weighersOrder := []string{}
for _, weigherConfig := range confedWeighers {
slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
@@ -98,6 +98,7 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
return nil, errors.New("unsupported weigher name: " + weigherConfig.Name)
}
weigher := makeWeigher()
+ // Validate that the weigher doesn't unexpectedly filter out hosts.
weigher = validateWeigher(weigher)
weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
if err := weigher.Init(ctx, client, weigherConfig); err != nil {
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index 7a85999b6..efb2a1ba0 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -72,8 +72,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
},
@@ -92,8 +92,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -103,8 +103,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -114,8 +114,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeDescheduler,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
{
@@ -125,8 +125,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
},
@@ -202,12 +202,12 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{
+ Filters: []v1alpha1.StepSpec{
{
Name: "test-filter",
},
},
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
},
@@ -241,7 +241,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -265,7 +265,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -283,7 +283,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -361,8 +361,8 @@ func TestBasePipelineController_HandlePipelineCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
}
@@ -406,8 +406,8 @@ func TestBasePipelineController_HandlePipelineUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.FilterSpec{},
- Weighers: []v1alpha1.WeigherSpec{},
+ Filters: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.StepSpec{},
},
}
@@ -667,7 +667,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -684,7 +684,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -717,7 +717,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -794,7 +794,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -945,7 +945,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
@@ -1013,7 +1013,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.WeigherSpec{
+ Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
Knowledges: []corev1.ObjectReference{
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index dcb1f4e02..f32c5377b 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -18,7 +18,7 @@ type mockFilter struct {
name string
}
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
@@ -36,7 +36,7 @@ type mockWeigher struct {
name string
}
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return nil
}
@@ -52,13 +52,13 @@ func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &pipeline[mockPipelineRequest]{
- filters: map[string]Filter[mockPipelineRequest]{
+ filters: map[string]Step[mockPipelineRequest]{
"mock_filter": &mockFilter{
name: "mock_filter",
},
},
filtersOrder: []string{"mock_filter"},
- weighers: map[string]Weigher[mockPipelineRequest]{
+ weighers: map[string]Step[mockPipelineRequest]{
"mock_weigher": &mockWeigher{
name: "mock_weigher",
},
@@ -136,7 +136,7 @@ func TestPipeline_NormalizeNovaWeights(t *testing.T) {
func TestPipeline_ApplyStepWeights(t *testing.T) {
p := &pipeline[mockPipelineRequest]{
- weighers: map[string]Weigher[mockPipelineRequest]{},
+ weighers: map[string]Step[mockPipelineRequest]{},
weighersOrder: []string{"step1", "step2"},
}
@@ -214,7 +214,7 @@ func TestPipeline_RunFilters(t *testing.T) {
filtersOrder: []string{
"mock_filter",
},
- filters: map[string]Filter[mockPipelineRequest]{
+ filters: map[string]Step[mockPipelineRequest]{
"mock_filter": mockStep,
},
}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index caac1ebf7..59fcf2976 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -30,9 +30,9 @@ type EmptyStepOpts struct{}
func (EmptyStepOpts) Validate() error { return nil }
// Interface for a scheduler step.
-type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
+type Step[RequestType PipelineRequest] interface {
// Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step StepType) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
// Run this step of the scheduling pipeline.
//
@@ -52,15 +52,9 @@ type Step[RequestType PipelineRequest, StepType v1alpha1.Step] interface {
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-// Step that acts as a weigher in the scheduling pipeline.
-type Weigher[RequestType PipelineRequest] = Step[RequestType, v1alpha1.WeigherSpec]
-
-// Step that acts as a filter in the scheduling pipeline.
-type Filter[RequestType PipelineRequest] = Step[RequestType, v1alpha1.FilterSpec]
-
// Common base for all steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step] struct {
+type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The activation function to use.
@@ -69,17 +63,9 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts, StepType v1alpha1.Step
Client client.Client
}
-// Common base implementation of a weigher step.
-// Functionally identical to BaseStep, but used for clarity.
-type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.WeigherSpec]
-
-// Common base implementation of a filter step.
-// Functionally identical to BaseStep, but used for clarity.
-type BaseFilter[RequestType PipelineRequest, Opts StepOpts] = BaseStep[RequestType, Opts, v1alpha1.FilterSpec]
-
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
- opts := conf.NewRawOptsBytes(step.GetOpts().Raw)
+func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ opts := conf.NewRawOptsBytes(step.Opts.Raw)
if err := s.Load(opts); err != nil {
return err
}
@@ -93,7 +79,7 @@ func (s *BaseStep[RequestType, Opts, StepType]) Init(ctx context.Context, client
// Get a default result (no action) for the input weight keys given in the request.
// Use this to initialize the result before applying filtering/weighing logic.
-func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
+func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
activations := make(map[string]float64)
for _, subject := range request.GetSubjects() {
activations[subject] = s.NoEffect()
@@ -103,7 +89,7 @@ func (s *BaseStep[RequestType, Opts, StepType]) IncludeAllHostsFromRequest(reque
}
// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts, StepType]) PrepareStats(request RequestType, unit string) StepStatistics {
+func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
return StepStatistics{
Unit: unit,
Subjects: make(map[string]float64, len(request.GetSubjects())),
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 42601a0bb..2e361c1b3 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -20,7 +20,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
+type StepMonitor[RequestType PipelineRequest] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -30,7 +30,7 @@ type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
stepName string
// The wrapped scheduler step to monitor.
- Step Step[RequestType, StepType]
+ Step Step[RequestType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -44,32 +44,32 @@ type StepMonitor[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
}
// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
+func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return s.Step.Init(ctx, client, step)
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
+func monitorStep[RequestType PipelineRequest](
_ context.Context,
_ client.Client,
- step StepType,
- impl Step[RequestType, StepType],
+ step v1alpha1.StepSpec,
+ impl Step[RequestType],
m PipelineMonitor,
-) *StepMonitor[RequestType, StepType] {
+) *StepMonitor[RequestType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, step.GetName())
+ WithLabelValues(m.PipelineName, step.Name)
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, step.GetName())
+ WithLabelValues(m.PipelineName, step.Name)
}
- return &StepMonitor[RequestType, StepType]{
+ return &StepMonitor[RequestType]{
Step: impl,
- stepName: step.GetName(),
+ stepName: step.Name,
pipelineName: m.PipelineName,
runTimer: runTimer,
stepSubjectWeight: m.stepSubjectWeight,
@@ -80,7 +80,7 @@ func monitorStep[RequestType PipelineRequest, StepType v1alpha1.Step](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index fea4e04e7..c248ec576 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -7,8 +7,6 @@ import (
"log/slog"
"os"
"testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
type mockObserver struct {
@@ -23,9 +21,9 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ monitor := &StepMonitor[mockPipelineRequest]{
stepName: "mock_step",
- Step: &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ Step: &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index a1940355f..31d335cd3 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -11,15 +11,15 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest, StepType v1alpha1.Step] struct {
- InitFunc func(ctx context.Context, client client.Client, step StepType) error
+type mockStep[RequestType PipelineRequest] struct {
+ InitFunc func(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType, StepType]) Init(ctx context.Context, client client.Client, step StepType) error {
+func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType, StepType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index a86e19ec5..629ba7b6b 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -15,17 +15,17 @@ import (
// Wrapper for scheduler steps that validates them before/after execution.
type WeigherValidator[RequestType PipelineRequest] struct {
// The wrapped weigher to validate.
- Weigher Weigher[RequestType]
+ Weigher Step[RequestType]
}
// Initialize the wrapped weigher with the database and options.
-func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
slog.Info("scheduler: init validation for step", "name", step.Name)
return s.Weigher.Init(ctx, client, step)
}
// Validate the wrapped weigher with the database and options.
-func validateWeigher[RequestType PipelineRequest](weigher Weigher[RequestType]) *WeigherValidator[RequestType] {
+func validateWeigher[RequestType PipelineRequest](weigher Step[RequestType]) *WeigherValidator[RequestType] {
return &WeigherValidator[RequestType]{Weigher: weigher}
}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index b97796c3f..aa6cba851 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -7,12 +7,10 @@ import (
"log/slog"
"reflect"
"testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ mockStep := &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
@@ -47,7 +45,7 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest, v1alpha1.WeigherSpec]{
+ mockStep := &mockStep[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
From 8b0fab0722fba5c65b8de2134de2e1dd42d9c703 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Fri, 23 Jan 2026 11:43:50 +0100
Subject: [PATCH 12/41] Use critical and non critical errors during pipeline
initialization
---
api/v1alpha1/pipeline_types.go | 13 ++--
config/crd/bases/cortex.cloud_pipelines.yaml | 20 +-----
config/crd/cortex.cloud_pipelines.yaml | 20 +-----
.../templates/crd/cortex.cloud_pipelines.yaml | 20 +-----
.../decisions/cinder/pipeline_controller.go | 4 +-
.../cinder/pipeline_controller_test.go | 58 +++++++--------
.../decisions/machines/pipeline_controller.go | 4 +-
.../machines/pipeline_controller_test.go | 45 ++++++------
.../decisions/manila/pipeline_controller.go | 4 +-
.../manila/pipeline_controller_test.go | 52 ++++++++------
.../decisions/nova/pipeline_controller.go | 4 +-
.../nova/pipeline_controller_test.go | 61 +++++++++-------
.../decisions/pods/pipeline_controller.go | 4 +-
.../pods/pipeline_controller_test.go | 44 +++++++-----
.../scheduling/descheduling/nova/pipeline.go | 10 +--
.../descheduling/nova/pipeline_controller.go | 14 +++-
.../nova/pipeline_controller_test.go | 47 +++++++------
.../descheduling/nova/pipeline_test.go | 41 ++++++-----
internal/scheduling/lib/pipeline.go | 40 +++++++----
.../scheduling/lib/pipeline_controller.go | 70 ++++++++++++-------
.../lib/pipeline_controller_test.go | 13 ++--
21 files changed, 312 insertions(+), 276 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index ae1259d96..da5ccc4c4 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -93,16 +93,11 @@ type PipelineSpec struct {
const (
// The pipeline is ready to be used.
PipelineConditionReady = "Ready"
+ // All steps in the pipeline are ready.
+ PipelineConditionAllStepsReady = "AllStepsReady"
)
type PipelineStatus struct {
- // The total number of steps configured in the pipeline.
- TotalSteps int `json:"totalSteps"`
- // The number of steps that are ready.
- ReadySteps int `json:"readySteps"`
- // An overview of the readiness of the steps in the pipeline.
- // Format: "ReadySteps / TotalSteps steps ready".
- StepsReadyFrac string `json:"stepsReadyFrac,omitempty"`
// The current status conditions of the pipeline.
// +kubebuilder:validation:Optional
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
@@ -114,8 +109,8 @@ type PipelineStatus struct {
// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.schedulingDomain"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
-// +kubebuilder:printcolumn:name="Steps",type="string",JSONPath=".status.stepsReadyFrac"
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status"
+// +kubebuilder:printcolumn:name="All Steps Ready",type="string",JSONPath=".status.conditions[?(@.type=='AllStepsReady')].status"
+// +kubebuilder:printcolumn:name="Pipeline Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status"
// Pipeline is the Schema for the decisions API
type Pipeline struct {
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index f8a096cb1..18f8d4b97 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -24,11 +24,11 @@ spec:
- jsonPath: .spec.type
name: Type
type: string
- - jsonPath: .status.stepsReadyFrac
- name: Steps
+ - jsonPath: .status.conditions[?(@.type=='AllStepsReady')].status
+ name: All Steps Ready
type: string
- jsonPath: .status.conditions[?(@.type=='Ready')].status
- name: Ready
+ name: Pipeline Ready
type: string
name: v1alpha1
schema:
@@ -380,20 +380,6 @@ spec:
- type
type: object
type: array
- readySteps:
- description: The number of steps that are ready.
- type: integer
- stepsReadyFrac:
- description: |-
- An overview of the readiness of the steps in the pipeline.
- Format: "ReadySteps / TotalSteps steps ready".
- type: string
- totalSteps:
- description: The total number of steps configured in the pipeline.
- type: integer
- required:
- - readySteps
- - totalSteps
type: object
required:
- spec
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index f8a096cb1..18f8d4b97 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -24,11 +24,11 @@ spec:
- jsonPath: .spec.type
name: Type
type: string
- - jsonPath: .status.stepsReadyFrac
- name: Steps
+ - jsonPath: .status.conditions[?(@.type=='AllStepsReady')].status
+ name: All Steps Ready
type: string
- jsonPath: .status.conditions[?(@.type=='Ready')].status
- name: Ready
+ name: Pipeline Ready
type: string
name: v1alpha1
schema:
@@ -380,20 +380,6 @@ spec:
- type
type: object
type: array
- readySteps:
- description: The number of steps that are ready.
- type: integer
- stepsReadyFrac:
- description: |-
- An overview of the readiness of the steps in the pipeline.
- Format: "ReadySteps / TotalSteps steps ready".
- type: string
- totalSteps:
- description: The total number of steps configured in the pipeline.
- type: integer
- required:
- - readySteps
- - totalSteps
type: object
required:
- spec
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index d8a6098d8..e59f3bff0 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -30,11 +30,11 @@ spec:
- jsonPath: .spec.type
name: Type
type: string
- - jsonPath: .status.stepsReadyFrac
- name: Steps
+ - jsonPath: .status.conditions[?(@.type=='AllStepsReady')].status
+ name: All Steps Ready
type: string
- jsonPath: .status.conditions[?(@.type=='Ready')].status
- name: Ready
+ name: Pipeline Ready
type: string
name: v1alpha1
schema:
@@ -386,20 +386,6 @@ spec:
- type
type: object
type: array
- readySteps:
- description: The number of steps that are ready.
- type: integer
- stepsReadyFrac:
- description: |-
- An overview of the readiness of the steps in the pipeline.
- Format: "ReadySteps / TotalSteps steps ready".
- type: string
- totalSteps:
- description: The total number of steps configured in the pipeline.
- type: integer
- required:
- - readySteps
- - totalSteps
type: object
required:
- spec
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller.go b/internal/scheduling/decisions/cinder/pipeline_controller.go
index eb3debc65..279cb3a68 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller.go
@@ -144,9 +144,9 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
+) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
- return lib.NewFilterWeigherPipeline(
+ return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
supportedFilters, p.Spec.Filters,
supportedWeighers, p.Spec.Weighers,
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 7121f5166..7dd8c7081 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -168,7 +168,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -179,10 +179,10 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Weighers: []v1alpha1.StepSpec{},
},
})
- if err != nil {
- t.Fatalf("Failed to init pipeline: %v", err)
+ if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ t.Fatalf("Failed to init pipeline: %v", initResult)
}
- controller.Pipelines[tt.pipeline.Name] = pipeline
+ controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
req := ctrl.Request{
@@ -410,11 +410,11 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
if tt.pipelineConfig != nil {
controller.PipelineConfigs[tt.pipelineConfig.Name] = *tt.pipelineConfig
- pipeline, err := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
- if err != nil {
- t.Fatalf("Failed to init pipeline: %v", err)
+ initResult := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
+ if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ t.Fatalf("Failed to init pipeline: %v", initResult)
}
- controller.Pipelines[tt.pipelineConfig.Name] = pipeline
+ controller.Pipelines[tt.pipelineConfig.Name] = initResult.Pipeline
}
err := controller.ProcessNewDecisionFromAPI(context.Background(), tt.decision)
@@ -475,16 +475,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
tests := []struct {
- name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
- expectError bool
+ name string
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
- name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
@@ -493,18 +495,14 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "test-plugin",
},
},
- weighers: []v1alpha1.StepSpec{
- {
- Name: "test-plugin",
- },
- },
- expectError: true, // Expected because test-plugin is not in supportedSteps
+ expectNonCriticalError: false,
+ expectCriticalError: true, // Expected because test-plugin is not in supportedSteps
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -516,14 +514,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectError && err == nil {
+ if tt.expectCriticalError && initResult.CriticalErr == nil {
t.Error("Expected error but got none")
}
- if !tt.expectError && err != nil {
- t.Errorf("Expected no error but got: %v", err)
+ if !tt.expectCriticalError && initResult.CriticalErr != nil {
+ t.Errorf("Expected no error but got: %v", initResult.CriticalErr)
+ }
+
+ if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ t.Error("Expected non-critical error but got none")
}
- if !tt.expectError && pipeline == nil {
- t.Error("Expected pipeline but got nil")
+ if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
}
})
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller.go b/internal/scheduling/decisions/machines/pipeline_controller.go
index 90cffbf01..5116ec097 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller.go
@@ -186,9 +186,9 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[ironcore.MachinePipelineRequest], error) {
+) lib.PipelineInitResult[lib.Pipeline[ironcore.MachinePipelineRequest]] {
- return lib.NewFilterWeigherPipeline(
+ return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
supportedFilters, p.Spec.Filters,
supportedWeighers, p.Spec.Weighers,
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index c218486bb..27aeb95ff 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -210,36 +210,40 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
tests := []struct {
- name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
- expectError bool
+ name string
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
- name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "noop step",
filters: []v1alpha1.StepSpec{
{Name: "noop"},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
filters: []v1alpha1.StepSpec{
{Name: "unsupported"},
},
- expectError: true,
+ expectNonCriticalError: false,
+ expectCriticalError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -251,18 +255,17 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectError && err == nil {
- t.Error("expected error but got none")
- return
+ if tt.expectCriticalError && initResult.CriticalErr == nil {
+ t.Error("Expected critical error but got none")
}
-
- if !tt.expectError && err != nil {
- t.Errorf("expected no error, got: %v", err)
- return
+ if !tt.expectCriticalError && initResult.CriticalErr != nil {
+ t.Errorf("Expected no critical error but got: %v", initResult.CriticalErr)
}
-
- if !tt.expectError && pipeline == nil {
- t.Error("expected pipeline to be non-nil")
+ if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ t.Error("Expected non-critical error but got none")
+ }
+ if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
}
})
}
diff --git a/internal/scheduling/decisions/manila/pipeline_controller.go b/internal/scheduling/decisions/manila/pipeline_controller.go
index cba5974ea..9f566e8f1 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller.go
@@ -144,9 +144,9 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
+) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
- return lib.NewFilterWeigherPipeline(
+ return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
supportedFilters, p.Spec.Filters,
supportedWeighers, p.Spec.Weighers,
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index ea8e20c14..eba1e2dd0 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -168,7 +168,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
@@ -177,7 +177,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
if err != nil {
t.Fatalf("Failed to init pipeline: %v", err)
}
- controller.Pipelines[tt.pipeline.Name] = pipeline
+ controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
req := ctrl.Request{
@@ -405,11 +405,11 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
if tt.pipelineConfig != nil {
controller.PipelineConfigs[tt.pipelineConfig.Name] = *tt.pipelineConfig
- pipeline, err := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
- if err != nil {
- t.Fatalf("Failed to init pipeline: %v", err)
+ initResult := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
+ if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ t.Fatalf("Failed to init pipeline: %v", initResult)
}
- controller.Pipelines[tt.pipelineConfig.Name] = pipeline
+ controller.Pipelines[tt.pipelineConfig.Name] = initResult.Pipeline
}
err := controller.ProcessNewDecisionFromAPI(context.Background(), tt.decision)
@@ -470,16 +470,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
tests := []struct {
- name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
- expectError bool
+ name string
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
- name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "supported netapp step",
@@ -491,7 +493,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
@@ -500,13 +503,14 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "unsupported-plugin",
},
},
- expectError: true,
+ expectNonCriticalError: false,
+ expectCriticalError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -518,14 +522,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectError && err == nil {
+ if tt.expectCriticalError && initResult.CriticalErr == nil {
t.Error("Expected error but got none")
}
- if !tt.expectError && err != nil {
- t.Errorf("Expected no error but got: %v", err)
+ if !tt.expectCriticalError && initResult.CriticalErr != nil {
+ t.Errorf("Expected no error but got: %v", initResult.CriticalErr)
+ }
+
+ if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ t.Error("Expected non-critical error but got none")
}
- if !tt.expectError && pipeline == nil {
- t.Error("Expected pipeline but got nil")
+ if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
}
})
}
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go
index dfbd7a249..fcdcbbd5c 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller.go
@@ -151,9 +151,9 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[api.ExternalSchedulerRequest], error) {
+) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
- return lib.NewFilterWeigherPipeline(
+ return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
supportedFilters, p.Spec.Filters,
supportedWeighers, p.Spec.Weighers,
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 69ead2c8c..ee1c456b4 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -207,16 +207,16 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
Spec: tt.pipeline.Spec,
})
- if err != nil {
+ if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
t.Fatalf("Failed to init pipeline: %v", err)
}
- controller.Pipelines[tt.pipeline.Name] = pipeline
+ controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
req := ctrl.Request{
@@ -263,16 +263,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
tests := []struct {
- name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
- expectError bool
+ name string
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
- name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "supported step",
@@ -281,7 +283,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "filter_status_conditions",
},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
@@ -290,7 +293,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "unsupported-plugin",
},
},
- expectError: true,
+ expectNonCriticalError: false,
+ expectCriticalError: true,
},
{
name: "step with scoping options",
@@ -302,7 +306,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "step with invalid scoping options",
@@ -314,13 +319,14 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
},
},
- expectError: true,
+ expectNonCriticalError: false,
+ expectCriticalError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -330,14 +336,17 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectError && err == nil {
- t.Error("Expected error but got none")
+ if tt.expectCriticalError && initResult.CriticalErr == nil {
+ t.Error("Expected critical error but got none")
}
- if !tt.expectError && err != nil {
- t.Errorf("Expected no error but got: %v", err)
+ if !tt.expectCriticalError && initResult.CriticalErr != nil {
+ t.Errorf("Expected no critical error but got: %v", initResult.CriticalErr)
+ }
+ if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ t.Error("Expected non-critical error but got none")
}
- if !tt.expectError && pipeline == nil {
- t.Error("Expected pipeline but got nil")
+ if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
}
})
}
@@ -672,16 +681,16 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
// Setup runtime pipeline if needed
if tt.pipeline != nil {
- pipeline, err := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(context.Background(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: tt.pipeline.Name,
},
Spec: tt.pipeline.Spec,
})
- if err != nil {
- t.Fatalf("Failed to init pipeline: %v", err)
+ if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ t.Fatalf("Failed to init pipeline: %v", initResult)
}
- controller.Pipelines[tt.pipeline.Name] = pipeline
+ controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
// Call the method under test
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index fbb8f84d0..7f59415d6 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -197,9 +197,9 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) (lib.Pipeline[pods.PodPipelineRequest], error) {
+) lib.PipelineInitResult[lib.Pipeline[pods.PodPipelineRequest]] {
- return lib.NewFilterWeigherPipeline(
+ return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
supportedFilters, p.Spec.Filters,
supportedWeighers, p.Spec.Weighers,
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 4e6a6f249..cdc11b86b 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -185,16 +185,18 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
tests := []struct {
- name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
- expectError bool
+ name string
+ filters []v1alpha1.StepSpec
+ weighers []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
- name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ filters: []v1alpha1.StepSpec{},
+ weighers: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "noop step",
@@ -203,7 +205,8 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "noop",
},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
@@ -212,13 +215,14 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
Name: "unsupported",
},
},
- expectError: true,
+ expectNonCriticalError: false,
+ expectCriticalError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pipeline, err := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
+ initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
},
@@ -228,18 +232,20 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectError && err == nil {
- t.Error("expected error but got none")
- return
+ if tt.expectCriticalError && initResult.CriticalErr == nil {
+ t.Error("expected critical error but got none")
}
- if !tt.expectError && err != nil {
- t.Errorf("expected no error, got: %v", err)
- return
+ if !tt.expectCriticalError && initResult.CriticalErr != nil {
+ t.Errorf("expected no critical error, got: %v", initResult.CriticalErr)
+ }
+
+ if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ t.Error("expected non-critical error but got none")
}
- if !tt.expectError && pipeline == nil {
- t.Error("expected pipeline to be non-nil")
+ if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
+ t.Errorf("expected no non-critical error, got: %v", initResult.NonCriticalErr)
}
})
}
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index d1c3445cf..a0a0226d2 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -35,7 +35,7 @@ func (p *Pipeline) Init(
ctx context.Context,
confedSteps []v1alpha1.StepSpec,
supportedSteps map[string]Step,
-) error {
+) (nonCriticalErr, criticalErr error) {
p.order = []string{}
// Load all steps from the configuration.
@@ -43,17 +43,19 @@ func (p *Pipeline) Init(
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
- return errors.New("descheduler: unsupported step: " + stepConf.Name)
+ nonCriticalErr = errors.New("descheduler: unsupported step name: " + stepConf.Name)
+ continue // Descheduler steps are optional.
}
step = monitorStep(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
- return err
+ nonCriticalErr = errors.New("descheduler: failed to initialize step " + stepConf.Name + ": " + err.Error())
+ continue // Descheduler steps are optional.
}
p.steps[stepConf.Name] = step
p.order = append(p.order, stepConf.Name)
slog.Info("descheduler: added step", "name", stepConf.Name)
}
- return nil
+ return nonCriticalErr, nil // At the moment, there are no critical errors.
}
// Execute the descheduler steps in parallel and collect the decisions made by
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index 0cae5eaff..1e10a4ad5 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -46,14 +46,22 @@ func (c *DeschedulingsPipelineController) PipelineType() v1alpha1.PipelineType {
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DeschedulingsPipelineController) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (*Pipeline, error) {
+func (c *DeschedulingsPipelineController) InitPipeline(
+ ctx context.Context,
+ p v1alpha1.Pipeline,
+) lib.PipelineInitResult[*Pipeline] {
+
pipeline := &Pipeline{
Client: c.Client,
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- err := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
- return pipeline, err
+ nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
+ return lib.PipelineInitResult[*Pipeline]{
+ Pipeline: pipeline,
+ NonCriticalErr: nonCriticalErr,
+ CriticalErr: criticalErr,
+ }
}
func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx context.Context) {
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 57b8231a5..3c24ce25a 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -39,10 +39,10 @@ func (m *mockControllerStep) Init(ctx context.Context, client client.Client, ste
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
- name string
- steps []v1alpha1.StepSpec
- expectError bool
- expectedError string
+ name string
+ steps []v1alpha1.StepSpec
+ expectNonCriticalError bool
+ expectCriticalError bool
}{
{
name: "successful pipeline initialization",
@@ -51,7 +51,8 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
Name: "mock-step",
},
},
- expectError: false,
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
{
name: "unsupported step",
@@ -60,13 +61,14 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
Name: "unsupported",
},
},
- expectError: true,
- expectedError: "descheduler: unsupported step: unsupported",
+ expectNonCriticalError: true,
+ expectCriticalError: false,
},
{
- name: "empty steps",
- steps: []v1alpha1.StepSpec{},
- expectError: false,
+ name: "empty steps",
+ steps: []v1alpha1.StepSpec{},
+ expectNonCriticalError: false,
+ expectCriticalError: false,
},
}
@@ -81,23 +83,28 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
CycleDetector: controller.CycleDetector,
Monitor: controller.Monitor,
}
- err := pipeline.Init(t.Context(), tt.steps, map[string]Step{
+ nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]Step{
"mock-step": &mockControllerStep{},
})
- if tt.expectError {
- if err == nil {
- t.Error("expected error but got none")
+ if tt.expectCriticalError {
+ if criticalErr == nil {
+ t.Errorf("expected critical error, got none")
}
- if tt.expectedError != "" && err.Error() != tt.expectedError {
- t.Errorf("expected error %q, got %q", tt.expectedError, err.Error())
+ } else {
+ if criticalErr != nil {
+ t.Errorf("unexpected critical error: %v", criticalErr)
}
- return
}
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- return
+ if tt.expectNonCriticalError {
+ if nonCriticalErr == nil {
+ t.Errorf("expected non-critical error, got none")
+ }
+ } else {
+ if nonCriticalErr != nil {
+ t.Errorf("unexpected non-critical error: %v", nonCriticalErr)
+ }
}
if pipeline.CycleDetector != controller.CycleDetector {
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index 4ed4f327e..5a0c9a027 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -40,11 +40,11 @@ func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step
func TestPipeline_Init(t *testing.T) {
tests := []struct {
- name string
- supportedSteps map[string]Step
- confedSteps []v1alpha1.StepSpec
- expectedSteps int
- expectedError bool
+ name string
+ supportedSteps map[string]Step
+ confedSteps []v1alpha1.StepSpec
+ expectedNonCriticalError bool
+ expectedCriticalError bool
}{
{
name: "successful initialization with single step",
@@ -54,7 +54,8 @@ func TestPipeline_Init(t *testing.T) {
confedSteps: []v1alpha1.StepSpec{{
Name: "test-step",
}},
- expectedSteps: 1,
+ expectedNonCriticalError: false,
+ expectedCriticalError: false,
},
{
name: "initialization with unsupported step",
@@ -64,7 +65,8 @@ func TestPipeline_Init(t *testing.T) {
confedSteps: []v1alpha1.StepSpec{{
Name: "unsupported-step",
}},
- expectedError: true,
+ expectedNonCriticalError: true,
+ expectedCriticalError: false,
},
{
name: "initialization with step init error",
@@ -74,7 +76,8 @@ func TestPipeline_Init(t *testing.T) {
confedSteps: []v1alpha1.StepSpec{{
Name: "failing-step",
}},
- expectedError: true,
+ expectedNonCriticalError: true,
+ expectedCriticalError: false,
},
{
name: "initialization with multiple steps",
@@ -90,7 +93,8 @@ func TestPipeline_Init(t *testing.T) {
Name: "step2",
},
},
- expectedSteps: 2,
+ expectedNonCriticalError: false,
+ expectedCriticalError: false,
},
}
@@ -98,21 +102,22 @@ func TestPipeline_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
pipeline := &Pipeline{}
- err := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
- if tt.expectedError {
- if err == nil {
- t.Fatalf("expected error during initialization, got none")
+ nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
+ if tt.expectedCriticalError {
+ if criticalErr == nil {
+ t.Fatalf("expected critical error during initialization, got none")
}
return
}
- if err != nil {
- t.Fatalf("Failed to initialize pipeline: %v", err)
+ if criticalErr != nil {
+ t.Fatalf("Failed to initialize pipeline: %v", criticalErr)
}
- if len(pipeline.steps) != tt.expectedSteps {
- t.Errorf("expected %d steps, got %d", tt.expectedSteps, len(pipeline.steps))
+ if nonCriticalErr != nil && !tt.expectedNonCriticalError {
+ t.Errorf("unexpected non-critical error during initialization: %v", nonCriticalErr)
+ } else if nonCriticalErr == nil && tt.expectedNonCriticalError {
+ t.Errorf("expected non-critical error during initialization, got none")
}
-
// Verify that successfully initialized steps are actually initialized
for _, step := range pipeline.steps {
if stepMonitor, ok := step.(StepMonitor); ok {
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 68f4401dd..0233349f5 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -47,7 +47,7 @@ type StepWrapper[RequestType PipelineRequest] func(
) (Step[RequestType], error)
// Create a new pipeline with filters and weighers contained in the configuration.
-func NewFilterWeigherPipeline[RequestType PipelineRequest](
+func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
@@ -56,14 +56,16 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
supportedWeighers map[string]func() Step[RequestType],
confedWeighers []v1alpha1.StepSpec,
monitor PipelineMonitor,
-) (Pipeline[RequestType], error) {
+) PipelineInitResult[Pipeline[RequestType]] {
pipelineMonitor := monitor.SubPipeline(name)
// Ensure there are no overlaps between filter and weigher names.
for filterName := range supportedFilters {
if _, ok := supportedWeighers[filterName]; ok {
- return nil, errors.New("step name overlap between filters and weighers: " + filterName)
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("step name overlap between filters and weighers: " + filterName),
+ }
}
}
@@ -75,12 +77,16 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "filters", maps.Keys(supportedFilters))
makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return nil, errors.New("unsupported filter name: " + filterConfig.Name)
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("unsupported filter name: " + filterConfig.Name),
+ }
}
filter := makeFilter()
filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
if err := filter.Init(ctx, client, filterConfig); err != nil {
- return nil, errors.New("failed to initialize filter: " + err.Error())
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
+ }
}
filtersByName[filterConfig.Name] = filter
filtersOrder = append(filtersOrder, filterConfig.Name)
@@ -90,32 +96,38 @@ func NewFilterWeigherPipeline[RequestType PipelineRequest](
// Load all weighers from the configuration.
weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
weighersOrder := []string{}
+ var nonCriticalErr error
for _, weigherConfig := range confedWeighers {
slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
makeWeigher, ok := supportedWeighers[weigherConfig.Name]
if !ok {
- return nil, errors.New("unsupported weigher name: " + weigherConfig.Name)
+ nonCriticalErr = errors.New("unsupported weigher name: " + weigherConfig.Name)
+ continue // Weighers are optional.
}
weigher := makeWeigher()
// Validate that the weigher doesn't unexpectedly filter out hosts.
weigher = validateWeigher(weigher)
weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
if err := weigher.Init(ctx, client, weigherConfig); err != nil {
- return nil, errors.New("failed to initialize pipeline step: " + err.Error())
+ nonCriticalErr = errors.New("failed to initialize weigher: " + err.Error())
+ continue // Weighers are optional.
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
}
- return &pipeline[RequestType]{
- filtersOrder: filtersOrder,
- filters: filtersByName,
- weighersOrder: weighersOrder,
- weighers: weighersByName,
- monitor: pipelineMonitor,
- }, nil
+ return PipelineInitResult[Pipeline[RequestType]]{
+ NonCriticalErr: nonCriticalErr,
+ Pipeline: &pipeline[RequestType]{
+ filtersOrder: filtersOrder,
+ filters: filtersByName,
+ weighersOrder: weighersOrder,
+ weighers: weighersByName,
+ monitor: pipelineMonitor,
+ },
+ }
}
// Execute filters and collect their activations by step name.
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 50e3af497..85c6099fe 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -18,6 +18,22 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
+// Result returned by the InitPipeline interface method.
+type PipelineInitResult[PipelineType any] struct {
+ // The pipeline, if successfully created.
+ Pipeline PipelineType
+
+ // A critical error that prevented the pipeline from being initialized.
+ // If a critical error occurs, the pipeline should not be used.
+ CriticalErr error
+
+ // A non-critical error that occurred during initialization.
+ // If a non-critical error occurs, the pipeline may still be used.
+ // However, the error should be reported in the pipeline status
+ // so we can debug potential issues.
+ NonCriticalErr error
+}
+
// The base pipeline controller will delegate some methods to the parent
// controller struct. The parent controller only needs to conform to this
// interface and set the delegate field accordingly.
@@ -27,7 +43,8 @@ type PipelineInitializer[PipelineType any] interface {
// This method is delegated to the parent controller, when a pipeline needs
// to be newly initialized or re-initialized to update it in the pipeline
// map.
- InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (PipelineType, error)
+ InitPipeline(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[PipelineType]
+
// Get the accepted pipeline type for this controller.
//
// This is used to filter pipelines when listing existing pipelines on
@@ -89,36 +106,22 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
log := ctrl.LoggerFrom(ctx)
old := obj.DeepCopy()
- // Check if all steps are ready. If not, check if the step is mandatory.
- obj.Status.TotalSteps = len(obj.Spec.Filters) + len(obj.Spec.Weighers) + len(obj.Spec.Detectors)
- obj.Status.ReadySteps = 0
- for range obj.Spec.Filters { // Could use len() directly but want to keep the pattern.
- // If needed, check if this filter needs any dependencies. For now,
- // as filters do not depend on knowledges, we skip this.
- obj.Status.ReadySteps++
- }
- for _, detector := range obj.Spec.Detectors {
- if err := c.checkAllKnowledgesReady(ctx, detector.Knowledges); err == nil {
- obj.Status.ReadySteps++
- }
- }
- for _, weigher := range obj.Spec.Weighers {
- if err := c.checkAllKnowledgesReady(ctx, weigher.Knowledges); err == nil {
- obj.Status.ReadySteps++
- }
- }
- obj.Status.StepsReadyFrac = fmt.Sprintf("%d/%d", obj.Status.ReadySteps, obj.Status.TotalSteps)
+ initResult := c.Initializer.InitPipeline(ctx, *obj)
- var err error
- c.Pipelines[obj.Name], err = c.Initializer.InitPipeline(ctx, *obj)
- c.PipelineConfigs[obj.Name] = *obj
- if err != nil {
- log.Error(err, "failed to create pipeline", "pipelineName", obj.Name)
+ // If there was a critical error, the pipeline cannot be used.
+ if initResult.CriticalErr != nil {
+ log.Error(initResult.CriticalErr, "failed to create pipeline", "pipelineName", obj.Name)
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
Type: v1alpha1.PipelineConditionReady,
Status: metav1.ConditionFalse,
Reason: "PipelineInitFailed",
- Message: err.Error(),
+ Message: initResult.CriticalErr.Error(),
+ })
+ meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
+ Type: v1alpha1.PipelineConditionAllStepsReady,
+ Status: metav1.ConditionFalse,
+ Reason: "PipelineInitFailed",
+ Message: initResult.CriticalErr.Error(),
})
patch := client.MergeFrom(old)
if err := c.Status().Patch(ctx, obj, patch); err != nil {
@@ -128,6 +131,21 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
delete(c.PipelineConfigs, obj.Name)
return
}
+
+ // If there was a non-critical error, continue running the pipeline but
+ // report the error in the pipeline status.
+ if initResult.NonCriticalErr != nil {
+ log.Error(initResult.NonCriticalErr, "non-critical error during pipeline initialization", "pipelineName", obj.Name)
+ meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
+ Type: v1alpha1.PipelineConditionAllStepsReady,
+ Status: metav1.ConditionFalse,
+ Reason: "SomeStepsNotReady",
+ Message: initResult.NonCriticalErr.Error(),
+ })
+ }
+
+ c.Pipelines[obj.Name] = initResult.Pipeline
+ c.PipelineConfigs[obj.Name] = *obj
log.Info("pipeline created and ready", "pipelineName", obj.Name)
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
Type: v1alpha1.PipelineConditionReady,
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index efb2a1ba0..bc0819cf1 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -26,14 +26,17 @@ type mockPipeline struct {
// Mock PipelineInitializer for testing
type mockPipelineInitializer struct {
pipelineType v1alpha1.PipelineType
- initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error)
+ initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[mockPipeline]
}
-func (m *mockPipelineInitializer) InitPipeline(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
+func (m *mockPipelineInitializer) InitPipeline(
+ ctx context.Context, p v1alpha1.Pipeline,
+) PipelineInitResult[mockPipeline] {
+
if m.initPipelineFunc != nil {
return m.initPipelineFunc(ctx, p)
}
- return mockPipeline{name: p.Name}, nil
+ return PipelineInitResult[mockPipeline]{Pipeline: mockPipeline{name: p.Name}}
}
func (m *mockPipelineInitializer) PipelineType() v1alpha1.PipelineType {
@@ -311,8 +314,8 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
}
if tt.initPipelineError {
- initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) (mockPipeline, error) {
- return mockPipeline{}, context.Canceled
+ initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[mockPipeline] {
+ return PipelineInitResult[mockPipeline]{CriticalErr: context.Canceled}
}
}
From 8e8bd2a97df70cc6c3c58d48349dfcc5aa7c92fa Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Fri, 23 Jan 2026 12:10:41 +0100
Subject: [PATCH 13/41] Moving first knowledge checking logic to step (wip)
---
api/v1alpha1/pipeline_types.go | 7 -
api/v1alpha1/zz_generated.deepcopy.go | 5 -
config/crd/bases/cortex.cloud_pipelines.yaml | 150 -------------
config/crd/cortex.cloud_pipelines.yaml | 150 -------------
.../templates/crd/cortex.cloud_pipelines.yaml | 150 -------------
.../descheduling/nova/plugins/base.go | 37 +++-
.../nova/plugins/kvm/avoid_high_steal_pct.go | 14 ++
.../scheduling/lib/pipeline_controller.go | 74 +------
.../lib/pipeline_controller_test.go | 202 +-----------------
9 files changed, 62 insertions(+), 727 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index da5ccc4c4..06d4c08a4 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -4,7 +4,6 @@
package v1alpha1
import (
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -22,12 +21,6 @@ type StepSpec struct {
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
-
- // If required, steps can specify knowledges on which they depend.
- // Changes to the knowledges' readiness will trigger re-evaluation of
- // pipelines containing this step.
- // +kubebuilder:validation:Optional
- Knowledges []corev1.ObjectReference `json:"knowledges,omitempty"`
}
type PipelineType string
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index c120577d1..ae02b8da2 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1101,11 +1101,6 @@ func (in *StepResult) DeepCopy() *StepResult {
func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
in.Opts.DeepCopyInto(&out.Opts)
- if in.Knowledges != nil {
- in, out := &in.Knowledges, &out.Knowledges
- *out = make([]v1.ObjectReference, len(*in))
- copy(*out, *in)
- }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 18f8d4b97..feedd4478 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -79,56 +79,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -157,56 +107,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -252,56 +152,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 18f8d4b97..feedd4478 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -79,56 +79,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -157,56 +107,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -252,56 +152,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index e59f3bff0..00826a8d8 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -85,56 +85,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -163,56 +113,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -258,56 +158,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- knowledges:
- description: |-
- If required, steps can specify knowledges on which they depend.
- Changes to the knowledges' readiness will trigger re-evaluation of
- pipelines containing this step.
- items:
- description: ObjectReference contains enough information to
- let you inspect or modify the referred object.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: |-
- If referring to a piece of an object instead of an entire object, this string
- should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
- For example, if the object reference is to a container within a pod, this would take on a value like:
- "spec.containers{name}" (where "name" refers to the name of the container that triggered
- the event) or if no container name is specified "spec.containers[2]" (container with
- index 2 in this pod). This syntax is chosen only to have some well-defined way of
- referencing a part of an object.
- type: string
- kind:
- description: |-
- Kind of the referent.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- type: string
- name:
- description: |-
- Name of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- type: string
- namespace:
- description: |-
- Namespace of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- type: string
- resourceVersion:
- description: |-
- Specific resourceVersion to which this reference is made, if any.
- More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
- type: string
- uid:
- description: |-
- UID of the referent.
- More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index 7c024b71f..c7bdd7ae3 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -5,9 +5,12 @@ package plugins
import (
"context"
+ "fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/pkg/conf"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -21,13 +24,41 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (s *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ d.Client = client
+
opts := conf.NewRawOptsBytes(step.Opts.Raw)
- if err := s.Load(opts); err != nil {
+ if err := d.Load(opts); err != nil {
return err
}
+ return nil
+}
- s.Client = client
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (d *Detector[PipelineType]) CheckAllKnowledgesReady(
+ ctx context.Context,
+ knowledges ...corev1.ObjectReference,
+) error {
+
+ for _, objRef := range knowledges {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := d.Client.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return fmt.Errorf("knowledge %s not ready: %s",
+ objRef.Name,
+ meta.FindStatusCondition(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady).Message,
+ )
+ }
+ if knowledge.Status.RawLength == 0 {
+ return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
+ }
+ }
return nil
}
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
index 348ee3249..d37035bc7 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
@@ -11,6 +11,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -24,6 +25,19 @@ type AvoidHighStealPctStep struct {
plugins.Detector[AvoidHighStealPctStepOpts]
}
+func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.Detector.Init(ctx, client, step); err != nil {
+ return err
+ }
+ // Check that all knowledges are ready.
+ if err := s.CheckAllKnowledgesReady(ctx,
+ corev1.ObjectReference{Name: "kvm-libvirt-domain-cpu-steal-pct"},
+ ); err != nil {
+ return err
+ }
+ return nil
+}
+
func (s *AvoidHighStealPctStep) Run() ([]plugins.Decision, error) {
if s.Options.MaxStealPctOverObservedTimeSpan <= 0 {
slog.Info("skipping step because maxStealPctOverObservedTimeSpan is not set or <= 0")
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 85c6099fe..cb66a52bb 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -8,7 +8,6 @@ import (
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
@@ -202,46 +201,7 @@ func (c *BasePipelineController[PipelineType]) HandlePipelineDeleted(
delete(c.PipelineConfigs, pipelineConf.Name)
}
-// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (c *BasePipelineController[PipelineType]) checkAllKnowledgesReady(
- ctx context.Context,
- objects []corev1.ObjectReference,
-) error {
-
- log := ctrl.LoggerFrom(ctx)
- // Check the status of all knowledges depending on this step.
- readyKnowledges := 0
- totalKnowledges := len(objects)
- for _, objRef := range objects {
- knowledge := &v1alpha1.Knowledge{}
- if err := c.Get(ctx, client.ObjectKey{
- Name: objRef.Name,
- Namespace: objRef.Namespace,
- }, knowledge); err != nil {
- log.Error(err, "failed to get knowledge depending on step", "knowledgeName", objRef.Name)
- continue
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- log.Info("knowledge not ready due to error condition", "knowledgeName", objRef.Name)
- continue
- }
- if knowledge.Status.RawLength == 0 {
- log.Info("knowledge not ready, no data available", "knowledgeName", objRef.Name)
- continue
- }
- readyKnowledges++
- }
- if readyKnowledges != totalKnowledges {
- return fmt.Errorf(
- "%d/%d knowledges ready",
- readyKnowledges, totalKnowledges,
- )
- }
- return nil
-}
-
-// Handle a knowledge creation, update, or delete event from watching knowledge resources.
+// Handle a knowledge creation, readiness update, or delete event from watching knowledge resources.
func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
ctx context.Context,
obj *v1alpha1.Knowledge,
@@ -252,37 +212,23 @@ func (c *BasePipelineController[PipelineType]) handleKnowledgeChange(
return
}
log := ctrl.LoggerFrom(ctx)
- log.Info("knowledge changed, re-evaluating dependent pipelines", "knowledgeName", obj.Name)
+ log.Info("knowledge changed, re-evaluating all pipelines", "knowledgeName", obj.Name)
// Find all pipelines depending on this knowledge and re-evaluate them.
var pipelines v1alpha1.PipelineList
if err := c.List(ctx, &pipelines); err != nil {
- log.Error(err, "failed to list pipelines for knowledge", "knowledgeName", obj.Name)
+ log.Error(err, "failed to list pipelines for knowledge change", "knowledgeName", obj.Name)
return
}
for _, pipeline := range pipelines.Items {
- needsUpdate := false
- // For filter-weigher pipelines, only weighers may depend on knowledges.
- for _, step := range pipeline.Spec.Weighers {
- for _, knowledgeRef := range step.Knowledges {
- if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
- needsUpdate = true
- break
- }
- }
- }
- // Check descheduler pipelines where detectors may depend on knowledges.
- for _, step := range pipeline.Spec.Detectors {
- for _, knowledgeRef := range step.Knowledges {
- if knowledgeRef.Name == obj.Name && knowledgeRef.Namespace == obj.Namespace {
- needsUpdate = true
- break
- }
- }
+ // TODO: Not all pipelines may depend on this knowledge. At the moment
+ // we re-evaluate all pipelines matching this controller.
+ if pipeline.Spec.SchedulingDomain != c.SchedulingDomain {
+ continue
}
- if needsUpdate {
- log.Info("re-evaluating pipeline due to knowledge change", "pipelineName", pipeline.Name)
- c.handlePipelineChange(ctx, &pipeline, queue)
+ if pipeline.Spec.Type != c.Initializer.PipelineType() {
+ continue
}
+ c.handlePipelineChange(ctx, &pipeline, queue)
}
}
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index bc0819cf1..e2fffe563 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -7,7 +7,6 @@ import (
"context"
"testing"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -235,30 +234,6 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
expectReady: true,
expectInMap: true,
},
- {
- name: "pipeline with optional step not ready",
- pipeline: &v1alpha1.Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-pipeline-optional",
- },
- Spec: v1alpha1.PipelineSpec{
- SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
- {
- Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "missing-knowledge", Namespace: "default"},
- },
- },
- },
- },
- },
- knowledges: []v1alpha1.Knowledge{},
- schedulingDomain: v1alpha1.SchedulingDomainNova,
- expectReady: true,
- expectInMap: true,
- },
{
name: "pipeline init fails",
pipeline: &v1alpha1.Pipeline{
@@ -484,157 +459,6 @@ func TestBasePipelineController_HandlePipelineDeleted(t *testing.T) {
}
}
-func TestBasePipelineController_checkAllKnowledgesReady(t *testing.T) {
- scheme := runtime.NewScheme()
- if err := v1alpha1.AddToScheme(scheme); err != nil {
- t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
- }
-
- tests := []struct {
- name string
- knowledges []v1alpha1.Knowledge
- expectError bool
- }{
- {
- name: "no knowledges",
- knowledges: []v1alpha1.Knowledge{},
- expectError: false,
- },
- {
- name: "ready knowledge",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- },
- expectError: false,
- },
- {
- name: "knowledge in error state",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "error-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- Conditions: []metav1.Condition{
- {
- Type: v1alpha1.KnowledgeConditionReady,
- Status: metav1.ConditionFalse,
- },
- },
- },
- },
- },
- expectError: true,
- },
- {
- name: "knowledge with no data",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "no-data-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 0,
- },
- },
- },
- expectError: true,
- },
- {
- name: "multiple knowledges, all ready",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "knowledge-1",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "knowledge-2",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 5,
- },
- },
- },
- expectError: false,
- },
- {
- name: "multiple knowledges, some not ready",
- knowledges: []v1alpha1.Knowledge{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 10,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "not-ready-knowledge",
- Namespace: "default",
- },
- Status: v1alpha1.KnowledgeStatus{
- RawLength: 0,
- },
- },
- },
- expectError: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- objects := make([]client.Object, len(tt.knowledges))
- for i := range tt.knowledges {
- objects[i] = &tt.knowledges[i]
- }
-
- fakeClient := fake.NewClientBuilder().
- WithScheme(scheme).
- WithObjects(objects...).
- Build()
-
- controller := &BasePipelineController[mockPipeline]{
- Client: fakeClient,
- }
-
- objectReferences := make([]corev1.ObjectReference, len(tt.knowledges))
- for i, k := range tt.knowledges {
- objectReferences[i] = corev1.ObjectReference{
- Name: k.Name,
- Namespace: k.Namespace,
- }
- }
- err := controller.checkAllKnowledgesReady(context.Background(), objectReferences)
-
- if tt.expectError && err == nil {
- t.Error("Expected error but got none")
- }
- if !tt.expectError && err != nil {
- t.Errorf("Expected no error but got: %v", err)
- }
- })
- }
-}
-
func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
@@ -649,7 +473,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
expectReEvaluated []string
}{
{
- name: "knowledge change triggers dependent pipeline re-evaluation",
+ name: "knowledge change triggers pipeline re-evaluation",
knowledge: &v1alpha1.Knowledge{
ObjectMeta: metav1.ObjectMeta{
Name: "test-knowledge",
@@ -665,7 +489,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
pipelines: []v1alpha1.Pipeline{
{
ObjectMeta: metav1.ObjectMeta{
- Name: "dependent-pipeline",
+ Name: "pipeline-1",
},
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
@@ -673,16 +497,13 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
- Name: "independent-pipeline",
+ Name: "pipeline-2",
},
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
@@ -690,16 +511,13 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "other-knowledge", Namespace: "default"},
- },
},
},
},
},
},
schedulingDomain: v1alpha1.SchedulingDomainNova,
- expectReEvaluated: []string{"dependent-pipeline"},
+ expectReEvaluated: []string{"pipeline-1", "pipeline-2"},
},
{
name: "knowledge change in different scheduling domain",
@@ -723,9 +541,6 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
@@ -800,9 +615,6 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
@@ -951,9 +763,6 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
@@ -1019,9 +828,6 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Weighers: []v1alpha1.StepSpec{
{
Name: "test-weigher",
- Knowledges: []corev1.ObjectReference{
- {Name: "test-knowledge", Namespace: "default"},
- },
},
},
},
From 29cd1277bb340ae399943eb8634c432a587ca806 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 10:56:03 +0100
Subject: [PATCH 14/41] Apply pattern to weighers
---
.../manila/pipeline_controller_test.go | 47 ++++++++++++++++++-
.../weighers/netapp_cpu_usage_balancing.go | 14 +++++-
.../vmware_anti_affinity_noisy_projects.go | 14 +++++-
.../vmware_avoid_long_term_contended_hosts.go | 14 +++++-
...vmware_avoid_short_term_contended_hosts.go | 14 +++++-
.../vmware_general_purpose_balancing.go | 17 ++++++-
.../weighers/vmware_hana_binpacking.go | 17 ++++++-
.../descheduling/nova/plugins/base.go | 16 +++----
.../nova/plugins/kvm/avoid_high_steal_pct.go | 6 +--
internal/scheduling/lib/step.go | 27 +++++++++++
10 files changed, 164 insertions(+), 22 deletions(-)
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index eba1e2dd0..f9e3bffdb 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -17,7 +17,9 @@ import (
api "github.com/cobaltcore-dev/cortex/api/delegation/manila"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "github.com/sapcc/go-bits/must"
+ "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/storage"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/pkg/conf"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -465,14 +467,16 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
}
func TestDecisionPipelineController_InitPipeline(t *testing.T) {
- controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ scheme := runtime.NewScheme()
+ if err := v1alpha1.AddToScheme(scheme); err != nil {
+ t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
}
tests := []struct {
name string
filters []v1alpha1.StepSpec
weighers []v1alpha1.StepSpec
+ knowledges []client.Object
expectNonCriticalError bool
expectCriticalError bool
}{
@@ -480,6 +484,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
name: "empty steps",
filters: []v1alpha1.StepSpec{},
weighers: []v1alpha1.StepSpec{},
+ knowledges: []client.Object{},
expectNonCriticalError: false,
expectCriticalError: false,
},
@@ -493,6 +498,34 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
},
},
+ knowledges: []client.Object{
+ &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "netapp-storage-pool-cpu-usage-manila",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ Raw: must.Return(v1alpha1.BoxFeatureList([]storage.StoragePoolCPUUsage{
+ {
+ StoragePoolName: "manila-share-1@backend1",
+ AvgCPUUsagePct: 50,
+ MaxCPUUsagePct: 80,
+ },
+ {
+ StoragePoolName: "manila-share-2@backend2",
+ AvgCPUUsagePct: 20,
+ MaxCPUUsagePct: 40,
+ },
+ })),
+ RawLength: 2,
+ },
+ },
+ },
expectNonCriticalError: false,
expectCriticalError: false,
},
@@ -510,6 +543,16 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ client := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(tt.knowledges...).
+ WithStatusSubresource(&v1alpha1.Decision{}).
+ Build()
+ controller := &DecisionPipelineController{
+ Monitor: lib.PipelineMonitor{},
+ }
+ controller.Client = client // Through basepipelinecontroller
+
initResult := controller.InitPipeline(t.Context(), v1alpha1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pipeline",
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index 944e38d5a..d3a9b0bbb 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -12,6 +12,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/storage"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -44,10 +45,21 @@ func (o NetappCPUUsageBalancingStepOpts) Validate() error {
// Step to balance CPU usage by avoiding highly used storage pools.
type NetappCPUUsageBalancingStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *NetappCPUUsageBalancingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "netapp-storage-pool-cpu-usage-manila"}); err != nil {
+ return err
+ }
+ return nil
+}
+
// Downvote hosts that are highly contended.
func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 5dcb35c12..0b1622376 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -12,6 +12,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -35,10 +36,21 @@ func (o VMwareAntiAffinityNoisyProjectsStepOpts) Validate() error {
// Step to avoid noisy projects by downvoting the hosts they are running on.
type VMwareAntiAffinityNoisyProjectsStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *VMwareAntiAffinityNoisyProjectsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-project-noisiness"}); err != nil {
+ return err
+ }
+ return nil
+}
+
// Downvote the hosts a project is currently running on if it's noisy.
func (s *VMwareAntiAffinityNoisyProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index 9908c4ca3..2d7359928 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -12,6 +12,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -44,10 +45,21 @@ func (o VMwareAvoidLongTermContendedHostsStepOpts) Validate() error {
// Step to avoid long term contended hosts by downvoting them.
type VMwareAvoidLongTermContendedHostsStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *VMwareAvoidLongTermContendedHostsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-long-term-contended-hosts"}); err != nil {
+ return err
+ }
+ return nil
+}
+
// Downvote hosts that are highly contended.
func (s *VMwareAvoidLongTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index 7b6cba041..3d841d25a 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -12,6 +12,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -44,10 +45,21 @@ func (o VMwareAvoidShortTermContendedHostsStepOpts) Validate() error {
// Step to avoid recently contended hosts by downvoting them.
type VMwareAvoidShortTermContendedHostsStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *VMwareAvoidShortTermContendedHostsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-short-term-contended-hosts"}); err != nil {
+ return err
+ }
+ return nil
+}
+
// Downvote hosts that are highly contended.
func (s *VMwareAvoidShortTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index 95a76f4ad..f4d39f1bc 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -13,6 +13,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -34,10 +35,24 @@ func (o VMwareGeneralPurposeBalancingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareGeneralPurposeBalancingStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *VMwareGeneralPurposeBalancingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx,
+ corev1.ObjectReference{Name: "host-utilization"},
+ corev1.ObjectReference{Name: "host-capabilities"},
+ ); err != nil {
+ return err
+ }
+ return nil
+}
+
// Pack VMs on hosts based on their flavor.
func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index 008c04266..c5e017e66 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -13,6 +13,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -34,10 +35,24 @@ func (o VMwareHanaBinpackingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareHanaBinpackingStep struct {
- // Weigher is a helper struct that provides common functionality for all steps.
+ // BaseStep is a helper struct that provides common functionality for all steps.
lib.BaseStep[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
+func (s *VMwareHanaBinpackingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ if err := s.BaseStep.Init(ctx, client, step); err != nil {
+ return err
+ }
+ if err := s.CheckKnowledges(ctx,
+ corev1.ObjectReference{Name: "host-utilization"},
+ corev1.ObjectReference{Name: "host-capabilities"},
+ ); err != nil {
+ return err
+ }
+ return nil
+}
+
// Pack VMs on hosts based on their flavor.
func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index c7bdd7ae3..0c62eb33b 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -35,12 +35,11 @@ func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1
}
// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (d *Detector[PipelineType]) CheckAllKnowledgesReady(
- ctx context.Context,
- knowledges ...corev1.ObjectReference,
-) error {
-
- for _, objRef := range knowledges {
+func (d *Detector[PipelineType]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+ if d.Client == nil {
+ return fmt.Errorf("kubernetes client not initialized")
+ }
+ for _, objRef := range kns {
knowledge := &v1alpha1.Knowledge{}
if err := d.Client.Get(ctx, client.ObjectKey{
Name: objRef.Name,
@@ -50,10 +49,7 @@ func (d *Detector[PipelineType]) CheckAllKnowledgesReady(
}
// Check if the knowledge status conditions indicate an error.
if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return fmt.Errorf("knowledge %s not ready: %s",
- objRef.Name,
- meta.FindStatusCondition(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady).Message,
- )
+ return fmt.Errorf("knowledge %s not ready", objRef.Name)
}
if knowledge.Status.RawLength == 0 {
return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
index d37035bc7..e5717edb1 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
@@ -25,14 +25,12 @@ type AvoidHighStealPctStep struct {
plugins.Detector[AvoidHighStealPctStepOpts]
}
+// Initialize the step and validate that all required knowledges are ready.
func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
if err := s.Detector.Init(ctx, client, step); err != nil {
return err
}
- // Check that all knowledges are ready.
- if err := s.CheckAllKnowledgesReady(ctx,
- corev1.ObjectReference{Name: "kvm-libvirt-domain-cpu-steal-pct"},
- ); err != nil {
+ if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "kvm-libvirt-domain-cpu-steal-pct"}); err != nil {
return err
}
return nil
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 59fcf2976..7fa133042 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -6,10 +6,13 @@ package lib
import (
"context"
"errors"
+ "fmt"
"log/slog"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/pkg/conf"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -77,6 +80,30 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl
return nil
}
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+ if d.Client == nil {
+ return fmt.Errorf("kubernetes client not initialized")
+ }
+ for _, objRef := range kns {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := d.Client.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return fmt.Errorf("knowledge %s not ready", objRef.Name)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
+ }
+ }
+ return nil
+}
+
// Get a default result (no action) for the input weight keys given in the request.
// Use this to initialize the result before applying filtering/weighing logic.
func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
From bf7f821d99108508871fbf0252d0ad171d3ec8a9 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 11:03:59 +0100
Subject: [PATCH 15/41] Fix linting issues from last commit
---
internal/scheduling/descheduling/nova/plugins/base.go | 3 ++-
internal/scheduling/lib/step.go | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index 0c62eb33b..14c0027d2 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -5,6 +5,7 @@ package plugins
import (
"context"
+ "errors"
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
@@ -37,7 +38,7 @@ func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1
// Check if all knowledges are ready, and if not, return an error indicating why not.
func (d *Detector[PipelineType]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
if d.Client == nil {
- return fmt.Errorf("kubernetes client not initialized")
+ return errors.New("kubernetes client not initialized")
}
for _, objRef := range kns {
knowledge := &v1alpha1.Knowledge{}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 7fa133042..99012a2f5 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -83,7 +83,7 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl
// Check if all knowledges are ready, and if not, return an error indicating why not.
func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
if d.Client == nil {
- return fmt.Errorf("kubernetes client not initialized")
+ return errors.New("kubernetes client not initialized")
}
for _, objRef := range kns {
knowledge := &v1alpha1.Knowledge{}
From 2abff2e5a39b474eae187c29d6c3787cfe6210ea Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 12:43:42 +0100
Subject: [PATCH 16/41] Add multiplier to apply to weighers
---
api/v1alpha1/pipeline_types.go | 7 +++++
config/crd/bases/cortex.cloud_pipelines.yaml | 21 ++++++++++++++
config/crd/cortex.cloud_pipelines.yaml | 21 ++++++++++++++
.../templates/crd/cortex.cloud_pipelines.yaml | 21 ++++++++++++++
internal/scheduling/lib/activation.go | 4 +--
internal/scheduling/lib/activation_test.go | 2 +-
internal/scheduling/lib/pipeline.go | 28 ++++++++++---------
7 files changed, 88 insertions(+), 16 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 06d4c08a4..9c88572ec 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -21,6 +21,13 @@ type StepSpec struct {
// and decisions made by it.
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
+
+ // Optional multiplier to apply to the step's output.
+ // This can be used to increase or decrease the weight of a step
+ // relative to other steps in the same pipeline.
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default=1.0
+ Multiplier float64 `json:"multiplier,omitempty"`
}
type PipelineType string
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index feedd4478..eced3b0d3 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -79,6 +79,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -107,6 +114,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -152,6 +166,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index feedd4478..eced3b0d3 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -79,6 +79,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -107,6 +114,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -152,6 +166,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 00826a8d8..a9c2eda84 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -85,6 +85,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -113,6 +120,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -158,6 +172,13 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
+ multiplier:
+ default: 1
+ description: |-
+ Optional multiplier to apply to the step's output.
+ This can be used to increase or decrease the weight of a step
+ relative to other steps in the same pipeline.
+ type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/internal/scheduling/lib/activation.go b/internal/scheduling/lib/activation.go
index b74f965c2..78704e7e5 100644
--- a/internal/scheduling/lib/activation.go
+++ b/internal/scheduling/lib/activation.go
@@ -18,14 +18,14 @@ func (m *ActivationFunction) Norm(activation float64) float64 {
// Apply the activation function to the weights map.
// All hosts that are not in the activations map are removed.
-func (m *ActivationFunction) Apply(in, activations map[string]float64) map[string]float64 {
+func (m *ActivationFunction) Apply(in, activations map[string]float64, multiplier float64) map[string]float64 {
for host, prevWeight := range in {
// Remove hosts that are not in the weights map.
if _, ok := activations[host]; !ok {
delete(in, host)
} else {
// Apply the activation from the step.
- (in)[host] = prevWeight + math.Tanh(activations[host])
+ (in)[host] = prevWeight + multiplier*math.Tanh(activations[host])
}
}
return in
diff --git a/internal/scheduling/lib/activation_test.go b/internal/scheduling/lib/activation_test.go
index 018890aad..408f1c12f 100644
--- a/internal/scheduling/lib/activation_test.go
+++ b/internal/scheduling/lib/activation_test.go
@@ -67,7 +67,7 @@ func TestActivationFunction_Apply(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- result := af.Apply(tt.in, tt.activations)
+ result := af.Apply(tt.in, tt.activations, 1.0)
if len(result) != len(tt.expected) {
t.Fatalf("expected %d hosts, got %d", len(tt.expected), len(result))
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 0233349f5..103a1f951 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -35,17 +35,12 @@ type pipeline[RequestType PipelineRequest] struct {
weighersOrder []string
// The weighers by their name.
weighers map[string]Step[RequestType]
+ // Multipliers to apply to weigher outputs.
+ weighersMultipliers map[string]float64
// Monitor to observe the pipeline.
monitor PipelineMonitor
}
-type StepWrapper[RequestType PipelineRequest] func(
- ctx context.Context,
- client client.Client,
- step v1alpha1.StepSpec,
- impl Step[RequestType],
-) (Step[RequestType], error)
-
// Create a new pipeline with filters and weighers contained in the configuration.
func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
@@ -95,6 +90,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
// Load all weighers from the configuration.
weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
+ weighersMultipliers := make(map[string]float64, len(confedWeighers))
weighersOrder := []string{}
var nonCriticalErr error
for _, weigherConfig := range confedWeighers {
@@ -115,17 +111,19 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
+ weighersMultipliers[weigherConfig.Name] = weigherConfig.Multiplier
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
}
return PipelineInitResult[Pipeline[RequestType]]{
NonCriticalErr: nonCriticalErr,
Pipeline: &pipeline[RequestType]{
- filtersOrder: filtersOrder,
- filters: filtersByName,
- weighersOrder: weighersOrder,
- weighers: weighersByName,
- monitor: pipelineMonitor,
+ filtersOrder: filtersOrder,
+ filters: filtersByName,
+ weighersOrder: weighersOrder,
+ weighers: weighersByName,
+ weighersMultipliers: weighersMultipliers,
+ monitor: pipelineMonitor,
},
}
}
@@ -225,7 +223,11 @@ func (p *pipeline[RequestType]) applyWeights(
// This is ok, since steps can be skipped.
continue
}
- outWeights = p.Apply(outWeights, weigherActivations)
+ multiplier, ok := p.weighersMultipliers[weigherName]
+ if !ok {
+ multiplier = 1.0
+ }
+ outWeights = p.Apply(outWeights, weigherActivations, multiplier)
}
return outWeights
}
From 0878bdb763d0f04c9661dc52a5ad4f5972bd25f7 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:01:45 +0100
Subject: [PATCH 17/41] Improve code structuring in scheduling/lib
---
...troller.go => base_pipeline_controller.go} | 34 ---
...st.go => base_pipeline_controller_test.go} | 25 --
internal/scheduling/lib/base_step.go | 84 +++++
internal/scheduling/lib/base_step_test.go | 6 +
.../scheduling/lib/filter_weigher_pipeline.go | 289 ++++++++++++++++++
.../lib/filter_weigher_pipeline_test.go | 231 ++++++++++++++
internal/scheduling/lib/pipeline.go | 281 -----------------
.../scheduling/lib/pipeline_initializer.go | 44 +++
.../lib/pipeline_initializer_test.go | 30 ++
.../lib/{request.go => pipeline_request.go} | 0
...quest_test.go => pipeline_request_test.go} | 0
internal/scheduling/lib/pipeline_test.go | 226 +-------------
internal/scheduling/lib/step.go | 83 -----
internal/scheduling/lib/step_opts.go | 15 +
internal/scheduling/lib/step_opts_test.go | 13 +
internal/scheduling/lib/step_test.go | 9 -
16 files changed, 714 insertions(+), 656 deletions(-)
rename internal/scheduling/lib/{pipeline_controller.go => base_pipeline_controller.go} (86%)
rename internal/scheduling/lib/{pipeline_controller_test.go => base_pipeline_controller_test.go} (97%)
create mode 100644 internal/scheduling/lib/base_step.go
create mode 100644 internal/scheduling/lib/base_step_test.go
create mode 100644 internal/scheduling/lib/filter_weigher_pipeline.go
create mode 100644 internal/scheduling/lib/filter_weigher_pipeline_test.go
create mode 100644 internal/scheduling/lib/pipeline_initializer.go
create mode 100644 internal/scheduling/lib/pipeline_initializer_test.go
rename internal/scheduling/lib/{request.go => pipeline_request.go} (100%)
rename internal/scheduling/lib/{request_test.go => pipeline_request_test.go} (100%)
create mode 100644 internal/scheduling/lib/step_opts.go
create mode 100644 internal/scheduling/lib/step_opts_test.go
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/base_pipeline_controller.go
similarity index 86%
rename from internal/scheduling/lib/pipeline_controller.go
rename to internal/scheduling/lib/base_pipeline_controller.go
index cb66a52bb..de45fd351 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/base_pipeline_controller.go
@@ -17,40 +17,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
-// Result returned by the InitPipeline interface method.
-type PipelineInitResult[PipelineType any] struct {
- // The pipeline, if successfully created.
- Pipeline PipelineType
-
- // A critical error that prevented the pipeline from being initialized.
- // If a critical error occurs, the pipeline should not be used.
- CriticalErr error
-
- // A non-critical error that occurred during initialization.
- // If a non-critical error occurs, the pipeline may still be used.
- // However, the error should be reported in the pipeline status
- // so we can debug potential issues.
- NonCriticalErr error
-}
-
-// The base pipeline controller will delegate some methods to the parent
-// controller struct. The parent controller only needs to conform to this
-// interface and set the delegate field accordingly.
-type PipelineInitializer[PipelineType any] interface {
- // Initialize a new pipeline with the given steps.
- //
- // This method is delegated to the parent controller, when a pipeline needs
- // to be newly initialized or re-initialized to update it in the pipeline
- // map.
- InitPipeline(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[PipelineType]
-
- // Get the accepted pipeline type for this controller.
- //
- // This is used to filter pipelines when listing existing pipelines on
- // startup or when reacting to pipeline events.
- PipelineType() v1alpha1.PipelineType
-}
-
// Base controller for decision pipelines.
type BasePipelineController[PipelineType any] struct {
// Initialized pipelines by their name.
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/base_pipeline_controller_test.go
similarity index 97%
rename from internal/scheduling/lib/pipeline_controller_test.go
rename to internal/scheduling/lib/base_pipeline_controller_test.go
index e2fffe563..52bc20414 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/base_pipeline_controller_test.go
@@ -17,31 +17,6 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
)
-// Mock pipeline type for testing
-type mockPipeline struct {
- name string
-}
-
-// Mock PipelineInitializer for testing
-type mockPipelineInitializer struct {
- pipelineType v1alpha1.PipelineType
- initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[mockPipeline]
-}
-
-func (m *mockPipelineInitializer) InitPipeline(
- ctx context.Context, p v1alpha1.Pipeline,
-) PipelineInitResult[mockPipeline] {
-
- if m.initPipelineFunc != nil {
- return m.initPipelineFunc(ctx, p)
- }
- return PipelineInitResult[mockPipeline]{Pipeline: mockPipeline{name: p.Name}}
-}
-
-func (m *mockPipelineInitializer) PipelineType() v1alpha1.PipelineType {
- return m.pipelineType
-}
-
func TestBasePipelineController_InitAllPipelines(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
diff --git a/internal/scheduling/lib/base_step.go b/internal/scheduling/lib/base_step.go
new file mode 100644
index 000000000..699ccaa34
--- /dev/null
+++ b/internal/scheduling/lib/base_step.go
@@ -0,0 +1,84 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "github.com/cobaltcore-dev/cortex/pkg/conf"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
+ // Options to pass via yaml to this step.
+ conf.JsonOpts[Opts]
+ // The activation function to use.
+ ActivationFunction
+ // The kubernetes client to use.
+ Client client.Client
+}
+
+// Init the step with the database and options.
+func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ opts := conf.NewRawOptsBytes(step.Opts.Raw)
+ if err := s.Load(opts); err != nil {
+ return err
+ }
+ if err := s.Options.Validate(); err != nil {
+ return err
+ }
+
+ s.Client = client
+ return nil
+}
+
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+ if d.Client == nil {
+ return errors.New("kubernetes client not initialized")
+ }
+ for _, objRef := range kns {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := d.Client.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return fmt.Errorf("knowledge %s not ready", objRef.Name)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
+ }
+ }
+ return nil
+}
+
+// Get a default result (no action) for the input weight keys given in the request.
+// Use this to initialize the result before applying filtering/weighing logic.
+func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
+ activations := make(map[string]float64)
+ for _, subject := range request.GetSubjects() {
+ activations[subject] = s.NoEffect()
+ }
+ stats := make(map[string]StepStatistics)
+ return &StepResult{Activations: activations, Statistics: stats}
+}
+
+// Get default statistics for the input weight keys given in the request.
+func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
+ return StepStatistics{
+ Unit: unit,
+ Subjects: make(map[string]float64, len(request.GetSubjects())),
+ }
+}
diff --git a/internal/scheduling/lib/base_step_test.go b/internal/scheduling/lib/base_step_test.go
new file mode 100644
index 000000000..5c54d3881
--- /dev/null
+++ b/internal/scheduling/lib/base_step_test.go
@@ -0,0 +1,6 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+// TODO
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
new file mode 100644
index 000000000..1f270a15b
--- /dev/null
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -0,0 +1,289 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "maps"
+ "math"
+ "slices"
+ "sort"
+ "sync"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Pipeline of scheduler steps.
+type filterWeigherPipeline[RequestType PipelineRequest] struct {
+ // The activation function to use when combining the
+ // results of the scheduler steps.
+ ActivationFunction
+ // The order in which filters are applied, by their step name.
+ filtersOrder []string
+ // The filters by their name.
+ filters map[string]Step[RequestType]
+ // The order in which weighers are applied, by their step name.
+ weighersOrder []string
+ // The weighers by their name.
+ weighers map[string]Step[RequestType]
+ // Multipliers to apply to weigher outputs.
+ weighersMultipliers map[string]float64
+ // Monitor to observe the pipeline.
+ monitor PipelineMonitor
+}
+
+// Create a new pipeline with filters and weighers contained in the configuration.
+func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
+ ctx context.Context,
+ client client.Client,
+ name string,
+ supportedFilters map[string]func() Step[RequestType],
+ confedFilters []v1alpha1.StepSpec,
+ supportedWeighers map[string]func() Step[RequestType],
+ confedWeighers []v1alpha1.StepSpec,
+ monitor PipelineMonitor,
+) PipelineInitResult[Pipeline[RequestType]] {
+
+ pipelineMonitor := monitor.SubPipeline(name)
+
+ // Ensure there are no overlaps between filter and weigher names.
+ for filterName := range supportedFilters {
+ if _, ok := supportedWeighers[filterName]; ok {
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("step name overlap between filters and weighers: " + filterName),
+ }
+ }
+ }
+
+ // Load all filters from the configuration.
+ filtersByName := make(map[string]Step[RequestType], len(confedFilters))
+ filtersOrder := []string{}
+ for _, filterConfig := range confedFilters {
+ slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
+ slog.Info("supported:", "filters", maps.Keys(supportedFilters))
+ makeFilter, ok := supportedFilters[filterConfig.Name]
+ if !ok {
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("unsupported filter name: " + filterConfig.Name),
+ }
+ }
+ filter := makeFilter()
+ filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
+ if err := filter.Init(ctx, client, filterConfig); err != nil {
+ return PipelineInitResult[Pipeline[RequestType]]{
+ CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
+ }
+ }
+ filtersByName[filterConfig.Name] = filter
+ filtersOrder = append(filtersOrder, filterConfig.Name)
+ slog.Info("scheduler: added filter", "name", filterConfig.Name)
+ }
+
+ // Load all weighers from the configuration.
+ weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
+ weighersMultipliers := make(map[string]float64, len(confedWeighers))
+ weighersOrder := []string{}
+ var nonCriticalErr error
+ for _, weigherConfig := range confedWeighers {
+ slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
+ slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
+ makeWeigher, ok := supportedWeighers[weigherConfig.Name]
+ if !ok {
+ nonCriticalErr = errors.New("unsupported weigher name: " + weigherConfig.Name)
+ continue // Weighers are optional.
+ }
+ weigher := makeWeigher()
+ // Validate that the weigher doesn't unexpectedly filter out hosts.
+ weigher = validateWeigher(weigher)
+ weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
+ if err := weigher.Init(ctx, client, weigherConfig); err != nil {
+ nonCriticalErr = errors.New("failed to initialize weigher: " + err.Error())
+ continue // Weighers are optional.
+ }
+ weighersByName[weigherConfig.Name] = weigher
+ weighersOrder = append(weighersOrder, weigherConfig.Name)
+ weighersMultipliers[weigherConfig.Name] = weigherConfig.Multiplier
+ slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
+ }
+
+ return PipelineInitResult[Pipeline[RequestType]]{
+ NonCriticalErr: nonCriticalErr,
+ Pipeline: &filterWeigherPipeline[RequestType]{
+ filtersOrder: filtersOrder,
+ filters: filtersByName,
+ weighersOrder: weighersOrder,
+ weighers: weighersByName,
+ weighersMultipliers: weighersMultipliers,
+ monitor: pipelineMonitor,
+ },
+ }
+}
+
+// Execute filters and collect their activations by step name.
+// During this process, the request is mutated to only include the
+// remaining subjects.
+func (p *filterWeigherPipeline[RequestType]) runFilters(
+ log *slog.Logger,
+ request RequestType,
+) (filteredRequest RequestType) {
+
+ filteredRequest = request
+ for _, filterName := range p.filtersOrder {
+ filter := p.filters[filterName]
+ stepLog := log.With("filter", filterName)
+ stepLog.Info("scheduler: running filter")
+ result, err := filter.Run(stepLog, filteredRequest)
+ if errors.Is(err, ErrStepSkipped) {
+ stepLog.Info("scheduler: filter skipped")
+ continue
+ }
+ if err != nil {
+ stepLog.Error("scheduler: failed to run filter", "error", err)
+ continue
+ }
+ stepLog.Info("scheduler: finished filter")
+ // Mutate the request to only include the remaining subjects.
+ // Assume the resulting request type is the same as the input type.
+ filteredRequest = filteredRequest.FilterSubjects(result.Activations).(RequestType)
+ }
+ return filteredRequest
+}
+
+// Execute weighers and collect their activations by step name.
+func (p *filterWeigherPipeline[RequestType]) runWeighers(
+ log *slog.Logger,
+ filteredRequest RequestType,
+) map[string]map[string]float64 {
+
+ activationsByStep := map[string]map[string]float64{}
+ // Weighers can be run in parallel as they do not modify the request.
+ var lock sync.Mutex
+ var wg sync.WaitGroup
+ for _, weigherName := range p.weighersOrder {
+ weigher := p.weighers[weigherName]
+ wg.Go(func() {
+ stepLog := log.With("weigher", weigherName)
+ stepLog.Info("scheduler: running weigher")
+ result, err := weigher.Run(stepLog, filteredRequest)
+ if errors.Is(err, ErrStepSkipped) {
+ stepLog.Info("scheduler: weigher skipped")
+ return
+ }
+ if err != nil {
+ stepLog.Error("scheduler: failed to run weigher", "error", err)
+ return
+ }
+ stepLog.Info("scheduler: finished weigher")
+ lock.Lock()
+ defer lock.Unlock()
+ activationsByStep[weigherName] = result.Activations
+ })
+ }
+ wg.Wait()
+ return activationsByStep
+}
+
+// Apply an initial weight to the subjects.
+//
+// Context:
+// Openstack schedulers may give us very large (positive/negative) weights such as
+// -99,000 or 99,000 (Nova). We want to respect these values, but still adjust them
+// to a meaningful value. If the scheduler really doesn't want us to run on a subject, it
+// should run a filter instead of setting a weight.
+func (p *filterWeigherPipeline[RequestType]) normalizeInputWeights(weights map[string]float64) map[string]float64 {
+ normalizedWeights := make(map[string]float64, len(weights))
+ for subjectname, weight := range weights {
+ normalizedWeights[subjectname] = math.Tanh(weight)
+ }
+ return normalizedWeights
+}
+
+// Apply the step weights to the input weights.
+func (p *filterWeigherPipeline[RequestType]) applyWeights(
+ stepWeights map[string]map[string]float64,
+ inWeights map[string]float64,
+) map[string]float64 {
+ // Copy to avoid modifying the original weights.
+ outWeights := make(map[string]float64, len(inWeights))
+ maps.Copy(outWeights, inWeights)
+
+ // Apply all activations in the strict order defined by the configuration.
+ for _, weigherName := range p.weighersOrder {
+ weigherActivations, ok := stepWeights[weigherName]
+ if !ok {
+ // This is ok, since steps can be skipped.
+ continue
+ }
+ multiplier, ok := p.weighersMultipliers[weigherName]
+ if !ok {
+ multiplier = 1.0
+ }
+ outWeights = p.Apply(outWeights, weigherActivations, multiplier)
+ }
+ return outWeights
+}
+
+// Sort the subjects by their weights.
+func (s *filterWeigherPipeline[RequestType]) sortSubjectsByWeights(weights map[string]float64) []string {
+ // Sort the subjects (keys) by their weights.
+ subjects := slices.Collect(maps.Keys(weights))
+ sort.Slice(subjects, func(i, j int) bool {
+ return weights[subjects[i]] > weights[subjects[j]]
+ })
+ return subjects
+}
+
+// Evaluate the pipeline and return a list of subjects in order of preference.
+func (p *filterWeigherPipeline[RequestType]) Run(request RequestType) (v1alpha1.DecisionResult, error) {
+ slogArgs := request.GetTraceLogArgs()
+ slogArgsAny := make([]any, 0, len(slogArgs))
+ for _, arg := range slogArgs {
+ slogArgsAny = append(slogArgsAny, arg)
+ }
+ traceLog := slog.With(slogArgsAny...)
+
+ subjectsIn := request.GetSubjects()
+ traceLog.Info("scheduler: starting pipeline", "subjects", subjectsIn)
+
+ // Normalize the input weights so we can apply step weights meaningfully.
+ inWeights := p.normalizeInputWeights(request.GetWeights())
+ traceLog.Info("scheduler: input weights", "weights", inWeights)
+
+ // Run filters first to reduce the number of subjects.
+ // Any weights assigned to filtered out subjects are ignored.
+ filteredRequest := p.runFilters(traceLog, request)
+ traceLog.Info(
+ "scheduler: finished filters",
+ "remainingSubjects", filteredRequest.GetSubjects(),
+ )
+
+ // Run weighers on the filtered subjects.
+ remainingWeights := make(map[string]float64, len(filteredRequest.GetSubjects()))
+ for _, subject := range filteredRequest.GetSubjects() {
+ remainingWeights[subject] = inWeights[subject]
+ }
+ stepWeights := p.runWeighers(traceLog, filteredRequest)
+ outWeights := p.applyWeights(stepWeights, remainingWeights)
+ traceLog.Info("scheduler: output weights", "weights", outWeights)
+
+ subjects := p.sortSubjectsByWeights(outWeights)
+ traceLog.Info("scheduler: sorted subjects", "subjects", subjects)
+
+ // Collect some metrics about the pipeline execution.
+ go p.monitor.observePipelineResult(request, subjects)
+
+ result := v1alpha1.DecisionResult{
+ RawInWeights: request.GetWeights(),
+ NormalizedInWeights: inWeights,
+ AggregatedOutWeights: outWeights,
+ OrderedHosts: subjects,
+ }
+ if len(subjects) > 0 {
+ result.TargetHost = &subjects[0]
+ }
+ return result, nil
+}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
new file mode 100644
index 000000000..3322c487f
--- /dev/null
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -0,0 +1,231 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+ "math"
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type mockFilter struct {
+ err error
+ name string
+}
+
+func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ return nil
+}
+
+func (m *mockFilter) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ if m.err != nil {
+ return nil, m.err
+ }
+ return &StepResult{
+ Activations: map[string]float64{"host1": 0.0, "host2": 0.0},
+ }, nil
+}
+
+type mockWeigher struct {
+ err error
+ name string
+}
+
+func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+ return nil
+}
+
+func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ if m.err != nil {
+ return nil, m.err
+ }
+ return &StepResult{
+ Activations: map[string]float64{"host1": 0.0, "host2": 1.0},
+ }, nil
+}
+
+func TestPipeline_Run(t *testing.T) {
+ // Create an instance of the pipeline with a mock step
+ pipeline := &filterWeigherPipeline[mockPipelineRequest]{
+ filters: map[string]Step[mockPipelineRequest]{
+ "mock_filter": &mockFilter{
+ name: "mock_filter",
+ },
+ },
+ filtersOrder: []string{"mock_filter"},
+ weighers: map[string]Step[mockPipelineRequest]{
+ "mock_weigher": &mockWeigher{
+ name: "mock_weigher",
+ },
+ },
+ weighersOrder: []string{"mock_weigher"},
+ }
+
+ tests := []struct {
+ name string
+ request mockPipelineRequest
+ expectedResult []string
+ }{
+ {
+ name: "Single step pipeline",
+ request: mockPipelineRequest{
+ Subjects: []string{"host1", "host2", "host3"},
+ Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
+ },
+ expectedResult: []string{"host2", "host1"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := pipeline.Run(tt.request)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ if len(result.OrderedHosts) != len(tt.expectedResult) {
+ t.Fatalf("expected %d results, got %d", len(tt.expectedResult), len(result.OrderedHosts))
+ }
+ for i, host := range tt.expectedResult {
+ if result.OrderedHosts[i] != host {
+ t.Errorf("expected host %s at position %d, got %s", host, i, result.OrderedHosts[i])
+ }
+ }
+ })
+ }
+}
+
+func TestPipeline_NormalizeNovaWeights(t *testing.T) {
+ p := &filterWeigherPipeline[mockPipelineRequest]{}
+
+ tests := []struct {
+ name string
+ weights map[string]float64
+ expected map[string]float64
+ }{
+ {
+ name: "Normalize weights",
+ weights: map[string]float64{
+ "host1": 1000.0,
+ "host2": -1000.0,
+ "host3": 0.0,
+ },
+ expected: map[string]float64{
+ "host1": 1.0,
+ "host2": -1.0,
+ "host3": 0.0,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := p.normalizeInputWeights(tt.weights)
+ for host, weight := range tt.expected {
+ if result[host] != weight {
+ t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host])
+ }
+ }
+ })
+ }
+}
+
+func TestPipeline_ApplyStepWeights(t *testing.T) {
+ p := &filterWeigherPipeline[mockPipelineRequest]{
+ weighers: map[string]Step[mockPipelineRequest]{},
+ weighersOrder: []string{"step1", "step2"},
+ }
+
+ tests := []struct {
+ name string
+ stepWeights map[string]map[string]float64
+ inWeights map[string]float64
+ expectedResult map[string]float64
+ }{
+ {
+ name: "Apply step weights",
+ stepWeights: map[string]map[string]float64{
+ "step1": {"host1": 0.5, "host2": 0.2},
+ "step2": {"host1": 0.3, "host2": 0.4},
+ },
+ inWeights: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ },
+ expectedResult: map[string]float64{
+ "host1": 1.0 + math.Tanh(0.5) + math.Tanh(0.3),
+ "host2": 1.0 + math.Tanh(0.2) + math.Tanh(0.4),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := p.applyWeights(tt.stepWeights, tt.inWeights)
+ for host, weight := range tt.expectedResult {
+ if result[host] != weight {
+ t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host])
+ }
+ }
+ })
+ }
+}
+
+func TestPipeline_SortHostsByWeights(t *testing.T) {
+ p := &filterWeigherPipeline[mockPipelineRequest]{}
+
+ tests := []struct {
+ name string
+ weights map[string]float64
+ expected []string
+ }{
+ {
+ name: "Sort hosts by weights",
+ weights: map[string]float64{
+ "host1": 0.5,
+ "host2": 1.0,
+ "host3": 0.2,
+ },
+ expected: []string{"host2", "host1", "host3"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := p.sortSubjectsByWeights(tt.weights)
+ for i, host := range tt.expected {
+ if result[i] != host {
+ t.Errorf("expected host %s at position %d, got %s", host, i, result[i])
+ }
+ }
+ })
+ }
+}
+
+func TestPipeline_RunFilters(t *testing.T) {
+ mockStep := &mockFilter{
+ name: "mock_filter",
+ }
+ p := &filterWeigherPipeline[mockPipelineRequest]{
+ filtersOrder: []string{
+ "mock_filter",
+ },
+ filters: map[string]Step[mockPipelineRequest]{
+ "mock_filter": mockStep,
+ },
+ }
+
+ request := mockPipelineRequest{
+ Subjects: []string{"host1", "host2"},
+ Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
+ }
+
+ req := p.runFilters(slog.Default(), request)
+ if len(req.Subjects) != 2 {
+ t.Fatalf("expected 2 step results, got %d", len(req.Subjects))
+ }
+}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
index 103a1f951..e6d3d6b86 100644
--- a/internal/scheduling/lib/pipeline.go
+++ b/internal/scheduling/lib/pipeline.go
@@ -4,291 +4,10 @@
package lib
import (
- "context"
- "errors"
- "log/slog"
- "maps"
- "math"
- "slices"
- "sort"
- "sync"
-
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
type Pipeline[RequestType PipelineRequest] interface {
// Run the scheduling pipeline with the given request.
Run(request RequestType) (v1alpha1.DecisionResult, error)
}
-
-// Pipeline of scheduler steps.
-type pipeline[RequestType PipelineRequest] struct {
- // The activation function to use when combining the
- // results of the scheduler steps.
- ActivationFunction
- // The order in which filters are applied, by their step name.
- filtersOrder []string
- // The filters by their name.
- filters map[string]Step[RequestType]
- // The order in which weighers are applied, by their step name.
- weighersOrder []string
- // The weighers by their name.
- weighers map[string]Step[RequestType]
- // Multipliers to apply to weigher outputs.
- weighersMultipliers map[string]float64
- // Monitor to observe the pipeline.
- monitor PipelineMonitor
-}
-
-// Create a new pipeline with filters and weighers contained in the configuration.
-func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
- ctx context.Context,
- client client.Client,
- name string,
- supportedFilters map[string]func() Step[RequestType],
- confedFilters []v1alpha1.StepSpec,
- supportedWeighers map[string]func() Step[RequestType],
- confedWeighers []v1alpha1.StepSpec,
- monitor PipelineMonitor,
-) PipelineInitResult[Pipeline[RequestType]] {
-
- pipelineMonitor := monitor.SubPipeline(name)
-
- // Ensure there are no overlaps between filter and weigher names.
- for filterName := range supportedFilters {
- if _, ok := supportedWeighers[filterName]; ok {
- return PipelineInitResult[Pipeline[RequestType]]{
- CriticalErr: errors.New("step name overlap between filters and weighers: " + filterName),
- }
- }
- }
-
- // Load all filters from the configuration.
- filtersByName := make(map[string]Step[RequestType], len(confedFilters))
- filtersOrder := []string{}
- for _, filterConfig := range confedFilters {
- slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
- slog.Info("supported:", "filters", maps.Keys(supportedFilters))
- makeFilter, ok := supportedFilters[filterConfig.Name]
- if !ok {
- return PipelineInitResult[Pipeline[RequestType]]{
- CriticalErr: errors.New("unsupported filter name: " + filterConfig.Name),
- }
- }
- filter := makeFilter()
- filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
- if err := filter.Init(ctx, client, filterConfig); err != nil {
- return PipelineInitResult[Pipeline[RequestType]]{
- CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
- }
- }
- filtersByName[filterConfig.Name] = filter
- filtersOrder = append(filtersOrder, filterConfig.Name)
- slog.Info("scheduler: added filter", "name", filterConfig.Name)
- }
-
- // Load all weighers from the configuration.
- weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
- weighersMultipliers := make(map[string]float64, len(confedWeighers))
- weighersOrder := []string{}
- var nonCriticalErr error
- for _, weigherConfig := range confedWeighers {
- slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
- slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
- makeWeigher, ok := supportedWeighers[weigherConfig.Name]
- if !ok {
- nonCriticalErr = errors.New("unsupported weigher name: " + weigherConfig.Name)
- continue // Weighers are optional.
- }
- weigher := makeWeigher()
- // Validate that the weigher doesn't unexpectedly filter out hosts.
- weigher = validateWeigher(weigher)
- weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
- if err := weigher.Init(ctx, client, weigherConfig); err != nil {
- nonCriticalErr = errors.New("failed to initialize weigher: " + err.Error())
- continue // Weighers are optional.
- }
- weighersByName[weigherConfig.Name] = weigher
- weighersOrder = append(weighersOrder, weigherConfig.Name)
- weighersMultipliers[weigherConfig.Name] = weigherConfig.Multiplier
- slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
- }
-
- return PipelineInitResult[Pipeline[RequestType]]{
- NonCriticalErr: nonCriticalErr,
- Pipeline: &pipeline[RequestType]{
- filtersOrder: filtersOrder,
- filters: filtersByName,
- weighersOrder: weighersOrder,
- weighers: weighersByName,
- weighersMultipliers: weighersMultipliers,
- monitor: pipelineMonitor,
- },
- }
-}
-
-// Execute filters and collect their activations by step name.
-// During this process, the request is mutated to only include the
-// remaining subjects.
-func (p *pipeline[RequestType]) runFilters(
- log *slog.Logger,
- request RequestType,
-) (filteredRequest RequestType) {
-
- filteredRequest = request
- for _, filterName := range p.filtersOrder {
- filter := p.filters[filterName]
- stepLog := log.With("filter", filterName)
- stepLog.Info("scheduler: running filter")
- result, err := filter.Run(stepLog, filteredRequest)
- if errors.Is(err, ErrStepSkipped) {
- stepLog.Info("scheduler: filter skipped")
- continue
- }
- if err != nil {
- stepLog.Error("scheduler: failed to run filter", "error", err)
- continue
- }
- stepLog.Info("scheduler: finished filter")
- // Mutate the request to only include the remaining subjects.
- // Assume the resulting request type is the same as the input type.
- filteredRequest = filteredRequest.FilterSubjects(result.Activations).(RequestType)
- }
- return filteredRequest
-}
-
-// Execute weighers and collect their activations by step name.
-func (p *pipeline[RequestType]) runWeighers(
- log *slog.Logger,
- filteredRequest RequestType,
-) map[string]map[string]float64 {
-
- activationsByStep := map[string]map[string]float64{}
- // Weighers can be run in parallel as they do not modify the request.
- var lock sync.Mutex
- var wg sync.WaitGroup
- for _, weigherName := range p.weighersOrder {
- weigher := p.weighers[weigherName]
- wg.Go(func() {
- stepLog := log.With("weigher", weigherName)
- stepLog.Info("scheduler: running weigher")
- result, err := weigher.Run(stepLog, filteredRequest)
- if errors.Is(err, ErrStepSkipped) {
- stepLog.Info("scheduler: weigher skipped")
- return
- }
- if err != nil {
- stepLog.Error("scheduler: failed to run weigher", "error", err)
- return
- }
- stepLog.Info("scheduler: finished weigher")
- lock.Lock()
- defer lock.Unlock()
- activationsByStep[weigherName] = result.Activations
- })
- }
- wg.Wait()
- return activationsByStep
-}
-
-// Apply an initial weight to the subjects.
-//
-// Context:
-// Openstack schedulers may give us very large (positive/negative) weights such as
-// -99,000 or 99,000 (Nova). We want to respect these values, but still adjust them
-// to a meaningful value. If the scheduler really doesn't want us to run on a subject, it
-// should run a filter instead of setting a weight.
-func (p *pipeline[RequestType]) normalizeInputWeights(weights map[string]float64) map[string]float64 {
- normalizedWeights := make(map[string]float64, len(weights))
- for subjectname, weight := range weights {
- normalizedWeights[subjectname] = math.Tanh(weight)
- }
- return normalizedWeights
-}
-
-// Apply the step weights to the input weights.
-func (p *pipeline[RequestType]) applyWeights(
- stepWeights map[string]map[string]float64,
- inWeights map[string]float64,
-) map[string]float64 {
- // Copy to avoid modifying the original weights.
- outWeights := make(map[string]float64, len(inWeights))
- maps.Copy(outWeights, inWeights)
-
- // Apply all activations in the strict order defined by the configuration.
- for _, weigherName := range p.weighersOrder {
- weigherActivations, ok := stepWeights[weigherName]
- if !ok {
- // This is ok, since steps can be skipped.
- continue
- }
- multiplier, ok := p.weighersMultipliers[weigherName]
- if !ok {
- multiplier = 1.0
- }
- outWeights = p.Apply(outWeights, weigherActivations, multiplier)
- }
- return outWeights
-}
-
-// Sort the subjects by their weights.
-func (s *pipeline[RequestType]) sortSubjectsByWeights(weights map[string]float64) []string {
- // Sort the subjects (keys) by their weights.
- subjects := slices.Collect(maps.Keys(weights))
- sort.Slice(subjects, func(i, j int) bool {
- return weights[subjects[i]] > weights[subjects[j]]
- })
- return subjects
-}
-
-// Evaluate the pipeline and return a list of subjects in order of preference.
-func (p *pipeline[RequestType]) Run(request RequestType) (v1alpha1.DecisionResult, error) {
- slogArgs := request.GetTraceLogArgs()
- slogArgsAny := make([]any, 0, len(slogArgs))
- for _, arg := range slogArgs {
- slogArgsAny = append(slogArgsAny, arg)
- }
- traceLog := slog.With(slogArgsAny...)
-
- subjectsIn := request.GetSubjects()
- traceLog.Info("scheduler: starting pipeline", "subjects", subjectsIn)
-
- // Normalize the input weights so we can apply step weights meaningfully.
- inWeights := p.normalizeInputWeights(request.GetWeights())
- traceLog.Info("scheduler: input weights", "weights", inWeights)
-
- // Run filters first to reduce the number of subjects.
- // Any weights assigned to filtered out subjects are ignored.
- filteredRequest := p.runFilters(traceLog, request)
- traceLog.Info(
- "scheduler: finished filters",
- "remainingSubjects", filteredRequest.GetSubjects(),
- )
-
- // Run weighers on the filtered subjects.
- remainingWeights := make(map[string]float64, len(filteredRequest.GetSubjects()))
- for _, subject := range filteredRequest.GetSubjects() {
- remainingWeights[subject] = inWeights[subject]
- }
- stepWeights := p.runWeighers(traceLog, filteredRequest)
- outWeights := p.applyWeights(stepWeights, remainingWeights)
- traceLog.Info("scheduler: output weights", "weights", outWeights)
-
- subjects := p.sortSubjectsByWeights(outWeights)
- traceLog.Info("scheduler: sorted subjects", "subjects", subjects)
-
- // Collect some metrics about the pipeline execution.
- go p.monitor.observePipelineResult(request, subjects)
-
- result := v1alpha1.DecisionResult{
- RawInWeights: request.GetWeights(),
- NormalizedInWeights: inWeights,
- AggregatedOutWeights: outWeights,
- OrderedHosts: subjects,
- }
- if len(subjects) > 0 {
- result.TargetHost = &subjects[0]
- }
- return result, nil
-}
diff --git a/internal/scheduling/lib/pipeline_initializer.go b/internal/scheduling/lib/pipeline_initializer.go
new file mode 100644
index 000000000..1b0ca6f3b
--- /dev/null
+++ b/internal/scheduling/lib/pipeline_initializer.go
@@ -0,0 +1,44 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+)
+
+// Result returned by the InitPipeline interface method.
+type PipelineInitResult[PipelineType any] struct {
+ // The pipeline, if successfully created.
+ Pipeline PipelineType
+
+ // A critical error that prevented the pipeline from being initialized.
+ // If a critical error occurs, the pipeline should not be used.
+ CriticalErr error
+
+ // A non-critical error that occurred during initialization.
+ // If a non-critical error occurs, the pipeline may still be used.
+ // However, the error should be reported in the pipeline status
+ // so we can debug potential issues.
+ NonCriticalErr error
+}
+
+// The base pipeline controller will delegate some methods to the parent
+// controller struct. The parent controller only needs to conform to this
+// interface and set the delegate field accordingly.
+type PipelineInitializer[PipelineType any] interface {
+ // Initialize a new pipeline with the given steps.
+ //
+ // This method is delegated to the parent controller, when a pipeline needs
+ // to be newly initialized or re-initialized to update it in the pipeline
+ // map.
+ InitPipeline(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[PipelineType]
+
+ // Get the accepted pipeline type for this controller.
+ //
+ // This is used to filter pipelines when listing existing pipelines on
+ // startup or when reacting to pipeline events.
+ PipelineType() v1alpha1.PipelineType
+}
diff --git a/internal/scheduling/lib/pipeline_initializer_test.go b/internal/scheduling/lib/pipeline_initializer_test.go
new file mode 100644
index 000000000..f7764a245
--- /dev/null
+++ b/internal/scheduling/lib/pipeline_initializer_test.go
@@ -0,0 +1,30 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+)
+
+// Mock PipelineInitializer for testing
+type mockPipelineInitializer struct {
+ pipelineType v1alpha1.PipelineType
+ initPipelineFunc func(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[mockPipeline]
+}
+
+func (m *mockPipelineInitializer) InitPipeline(
+ ctx context.Context, p v1alpha1.Pipeline,
+) PipelineInitResult[mockPipeline] {
+
+ if m.initPipelineFunc != nil {
+ return m.initPipelineFunc(ctx, p)
+ }
+ return PipelineInitResult[mockPipeline]{Pipeline: mockPipeline{name: p.Name}}
+}
+
+func (m *mockPipelineInitializer) PipelineType() v1alpha1.PipelineType {
+ return m.pipelineType
+}
diff --git a/internal/scheduling/lib/request.go b/internal/scheduling/lib/pipeline_request.go
similarity index 100%
rename from internal/scheduling/lib/request.go
rename to internal/scheduling/lib/pipeline_request.go
diff --git a/internal/scheduling/lib/request_test.go b/internal/scheduling/lib/pipeline_request_test.go
similarity index 100%
rename from internal/scheduling/lib/request_test.go
rename to internal/scheduling/lib/pipeline_request_test.go
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
index f32c5377b..c5360bc88 100644
--- a/internal/scheduling/lib/pipeline_test.go
+++ b/internal/scheduling/lib/pipeline_test.go
@@ -3,229 +3,7 @@
package lib
-import (
- "context"
- "log/slog"
- "math"
- "testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-type mockFilter struct {
- err error
- name string
-}
-
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- return nil
-}
-
-func (m *mockFilter) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- if m.err != nil {
- return nil, m.err
- }
- return &StepResult{
- Activations: map[string]float64{"host1": 0.0, "host2": 0.0},
- }, nil
-}
-
-type mockWeigher struct {
- err error
+// Mock pipeline type for testing
+type mockPipeline struct {
name string
}
-
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- return nil
-}
-
-func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- if m.err != nil {
- return nil, m.err
- }
- return &StepResult{
- Activations: map[string]float64{"host1": 0.0, "host2": 1.0},
- }, nil
-}
-
-func TestPipeline_Run(t *testing.T) {
- // Create an instance of the pipeline with a mock step
- pipeline := &pipeline[mockPipelineRequest]{
- filters: map[string]Step[mockPipelineRequest]{
- "mock_filter": &mockFilter{
- name: "mock_filter",
- },
- },
- filtersOrder: []string{"mock_filter"},
- weighers: map[string]Step[mockPipelineRequest]{
- "mock_weigher": &mockWeigher{
- name: "mock_weigher",
- },
- },
- weighersOrder: []string{"mock_weigher"},
- }
-
- tests := []struct {
- name string
- request mockPipelineRequest
- expectedResult []string
- }{
- {
- name: "Single step pipeline",
- request: mockPipelineRequest{
- Subjects: []string{"host1", "host2", "host3"},
- Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
- },
- expectedResult: []string{"host2", "host1"},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result, err := pipeline.Run(tt.request)
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
- if len(result.OrderedHosts) != len(tt.expectedResult) {
- t.Fatalf("expected %d results, got %d", len(tt.expectedResult), len(result.OrderedHosts))
- }
- for i, host := range tt.expectedResult {
- if result.OrderedHosts[i] != host {
- t.Errorf("expected host %s at position %d, got %s", host, i, result.OrderedHosts[i])
- }
- }
- })
- }
-}
-
-func TestPipeline_NormalizeNovaWeights(t *testing.T) {
- p := &pipeline[mockPipelineRequest]{}
-
- tests := []struct {
- name string
- weights map[string]float64
- expected map[string]float64
- }{
- {
- name: "Normalize weights",
- weights: map[string]float64{
- "host1": 1000.0,
- "host2": -1000.0,
- "host3": 0.0,
- },
- expected: map[string]float64{
- "host1": 1.0,
- "host2": -1.0,
- "host3": 0.0,
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result := p.normalizeInputWeights(tt.weights)
- for host, weight := range tt.expected {
- if result[host] != weight {
- t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host])
- }
- }
- })
- }
-}
-
-func TestPipeline_ApplyStepWeights(t *testing.T) {
- p := &pipeline[mockPipelineRequest]{
- weighers: map[string]Step[mockPipelineRequest]{},
- weighersOrder: []string{"step1", "step2"},
- }
-
- tests := []struct {
- name string
- stepWeights map[string]map[string]float64
- inWeights map[string]float64
- expectedResult map[string]float64
- }{
- {
- name: "Apply step weights",
- stepWeights: map[string]map[string]float64{
- "step1": {"host1": 0.5, "host2": 0.2},
- "step2": {"host1": 0.3, "host2": 0.4},
- },
- inWeights: map[string]float64{
- "host1": 1.0,
- "host2": 1.0,
- },
- expectedResult: map[string]float64{
- "host1": 1.0 + math.Tanh(0.5) + math.Tanh(0.3),
- "host2": 1.0 + math.Tanh(0.2) + math.Tanh(0.4),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result := p.applyWeights(tt.stepWeights, tt.inWeights)
- for host, weight := range tt.expectedResult {
- if result[host] != weight {
- t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host])
- }
- }
- })
- }
-}
-
-func TestPipeline_SortHostsByWeights(t *testing.T) {
- p := &pipeline[mockPipelineRequest]{}
-
- tests := []struct {
- name string
- weights map[string]float64
- expected []string
- }{
- {
- name: "Sort hosts by weights",
- weights: map[string]float64{
- "host1": 0.5,
- "host2": 1.0,
- "host3": 0.2,
- },
- expected: []string{"host2", "host1", "host3"},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result := p.sortSubjectsByWeights(tt.weights)
- for i, host := range tt.expected {
- if result[i] != host {
- t.Errorf("expected host %s at position %d, got %s", host, i, result[i])
- }
- }
- })
- }
-}
-
-func TestPipeline_RunFilters(t *testing.T) {
- mockStep := &mockFilter{
- name: "mock_filter",
- }
- p := &pipeline[mockPipelineRequest]{
- filtersOrder: []string{
- "mock_filter",
- },
- filters: map[string]Step[mockPipelineRequest]{
- "mock_filter": mockStep,
- },
- }
-
- request := mockPipelineRequest{
- Subjects: []string{"host1", "host2"},
- Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
- }
-
- req := p.runFilters(slog.Default(), request)
- if len(req.Subjects) != 2 {
- t.Fatalf("expected 2 step results, got %d", len(req.Subjects))
- }
-}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 99012a2f5..fa6c055ff 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -6,13 +6,9 @@ package lib
import (
"context"
"errors"
- "fmt"
"log/slog"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/pkg/conf"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -21,17 +17,6 @@ var (
ErrStepSkipped = errors.New("step skipped")
)
-// Interface to which step options must conform.
-type StepOpts interface {
- // Validate the options for this step.
- Validate() error
-}
-
-// Empty step opts conforming to the StepOpts interface (validation always succeeds).
-type EmptyStepOpts struct{}
-
-func (EmptyStepOpts) Validate() error { return nil }
-
// Interface for a scheduler step.
type Step[RequestType PipelineRequest] interface {
// Configure the step and initialize things like a database connection.
@@ -54,71 +39,3 @@ type Step[RequestType PipelineRequest] interface {
// be used to log the step's execution.
Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-
-// Common base for all steps that provides some functionality
-// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
- // Options to pass via yaml to this step.
- conf.JsonOpts[Opts]
- // The activation function to use.
- ActivationFunction
- // The kubernetes client to use.
- Client client.Client
-}
-
-// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- opts := conf.NewRawOptsBytes(step.Opts.Raw)
- if err := s.Load(opts); err != nil {
- return err
- }
- if err := s.Options.Validate(); err != nil {
- return err
- }
-
- s.Client = client
- return nil
-}
-
-// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
- if d.Client == nil {
- return errors.New("kubernetes client not initialized")
- }
- for _, objRef := range kns {
- knowledge := &v1alpha1.Knowledge{}
- if err := d.Client.Get(ctx, client.ObjectKey{
- Name: objRef.Name,
- Namespace: objRef.Namespace,
- }, knowledge); err != nil {
- return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return fmt.Errorf("knowledge %s not ready", objRef.Name)
- }
- if knowledge.Status.RawLength == 0 {
- return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
- }
- }
- return nil
-}
-
-// Get a default result (no action) for the input weight keys given in the request.
-// Use this to initialize the result before applying filtering/weighing logic.
-func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
- activations := make(map[string]float64)
- for _, subject := range request.GetSubjects() {
- activations[subject] = s.NoEffect()
- }
- stats := make(map[string]StepStatistics)
- return &StepResult{Activations: activations, Statistics: stats}
-}
-
-// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
- return StepStatistics{
- Unit: unit,
- Subjects: make(map[string]float64, len(request.GetSubjects())),
- }
-}
diff --git a/internal/scheduling/lib/step_opts.go b/internal/scheduling/lib/step_opts.go
new file mode 100644
index 000000000..3ffa7b44d
--- /dev/null
+++ b/internal/scheduling/lib/step_opts.go
@@ -0,0 +1,15 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+// Interface to which step options must conform.
+type StepOpts interface {
+ // Validate the options for this step.
+ Validate() error
+}
+
+// Empty step opts conforming to the StepOpts interface (validation always succeeds).
+type EmptyStepOpts struct{}
+
+func (EmptyStepOpts) Validate() error { return nil }
diff --git a/internal/scheduling/lib/step_opts_test.go b/internal/scheduling/lib/step_opts_test.go
new file mode 100644
index 000000000..ad2b595ea
--- /dev/null
+++ b/internal/scheduling/lib/step_opts_test.go
@@ -0,0 +1,13 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+type MockOptions struct {
+ Option1 string `json:"option1"`
+ Option2 int `json:"option2"`
+}
+
+func (o MockOptions) Validate() error {
+ return nil
+}
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/step_test.go
index 31d335cd3..2e5e899e7 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/step_test.go
@@ -22,12 +22,3 @@ func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client,
func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
-
-type MockOptions struct {
- Option1 string `json:"option1"`
- Option2 int `json:"option2"`
-}
-
-func (o MockOptions) Validate() error {
- return nil
-}
From 8db7422a81303f19cf50c2c9c2dfa5270b635665 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:04:58 +0100
Subject: [PATCH 18/41] Multiplier should be nilable
---
api/v1alpha1/pipeline_types.go | 3 +--
api/v1alpha1/zz_generated.deepcopy.go | 5 +++++
config/crd/bases/cortex.cloud_pipelines.yaml | 3 ---
config/crd/cortex.cloud_pipelines.yaml | 3 ---
dist/chart/templates/crd/cortex.cloud_pipelines.yaml | 3 ---
internal/scheduling/lib/filter_weigher_pipeline.go | 6 +++++-
6 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 9c88572ec..12a5108c0 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -26,8 +26,7 @@ type StepSpec struct {
// This can be used to increase or decrease the weight of a step
// relative to other steps in the same pipeline.
// +kubebuilder:validation:Optional
- // +kubebuilder:default=1.0
- Multiplier float64 `json:"multiplier,omitempty"`
+ Multiplier *float64 `json:"multiplier,omitempty"`
}
type PipelineType string
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index ae02b8da2..3bbf4e967 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1101,6 +1101,11 @@ func (in *StepResult) DeepCopy() *StepResult {
func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
in.Opts.DeepCopyInto(&out.Opts)
+ if in.Multiplier != nil {
+ in, out := &in.Multiplier, &out.Multiplier
+ *out = new(float64)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index eced3b0d3..55ea0fec1 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -80,7 +80,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -115,7 +114,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -167,7 +165,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index eced3b0d3..55ea0fec1 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -80,7 +80,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -115,7 +114,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -167,7 +165,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index a9c2eda84..2de6eb87a 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -86,7 +86,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -121,7 +120,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
@@ -173,7 +171,6 @@ spec:
and decisions made by it.
type: string
multiplier:
- default: 1
description: |-
Optional multiplier to apply to the step's output.
This can be used to increase or decrease the weight of a step
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
index 1f270a15b..07adbac31 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -106,7 +106,11 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
- weighersMultipliers[weigherConfig.Name] = weigherConfig.Multiplier
+ if weigherConfig.Multiplier == nil {
+ weighersMultipliers[weigherConfig.Name] = 1.0
+ } else {
+ weighersMultipliers[weigherConfig.Name] = *weigherConfig.Multiplier
+ }
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
}
From a5ff24420fde70c4184b93e128e1974a7d393507 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:10:37 +0100
Subject: [PATCH 19/41] Rename opts to params
---
api/v1alpha1/pipeline_types.go | 2 +-
api/v1alpha1/zz_generated.deepcopy.go | 2 +-
config/crd/bases/cortex.cloud_pipelines.yaml | 6 +++---
config/crd/cortex.cloud_pipelines.yaml | 6 +++---
dist/chart/templates/crd/cortex.cloud_pipelines.yaml | 6 +++---
.../scheduling/decisions/manila/pipeline_controller_test.go | 2 +-
.../scheduling/decisions/nova/pipeline_controller_test.go | 4 ++--
internal/scheduling/descheduling/nova/plugins/base.go | 2 +-
internal/scheduling/descheduling/nova/plugins/base_test.go | 2 +-
internal/scheduling/lib/base_step.go | 2 +-
10 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 12a5108c0..67835c4ed 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -15,7 +15,7 @@ type StepSpec struct {
// Additional configuration for the step that can be used
// +kubebuilder:validation:Optional
- Opts runtime.RawExtension `json:"opts,omitempty"`
+ Params runtime.RawExtension `json:"params,omitempty"`
// Additional description of the step which helps understand its purpose
// and decisions made by it.
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 3bbf4e967..7fba343ac 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1100,7 +1100,7 @@ func (in *StepResult) DeepCopy() *StepResult {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepSpec) DeepCopyInto(out *StepSpec) {
*out = *in
- in.Opts.DeepCopyInto(&out.Opts)
+ in.Params.DeepCopyInto(&out.Params)
if in.Multiplier != nil {
in, out := &in.Multiplier, &out.Multiplier
*out = new(float64)
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 55ea0fec1..21aabe065 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -90,7 +90,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -124,7 +124,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -175,7 +175,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 55ea0fec1..21aabe065 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -90,7 +90,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -124,7 +124,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -175,7 +175,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 2de6eb87a..6644dcd27 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -96,7 +96,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -130,7 +130,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
@@ -181,7 +181,7 @@ spec:
The name of the scheduler step in the cortex implementation.
Must match to a step implemented by the pipeline controller.
type: string
- opts:
+ params:
description: Additional configuration for the step that can
be used
type: object
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index f9e3bffdb..68c6b9c89 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -493,7 +493,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
weighers: []v1alpha1.StepSpec{
{
Name: "netapp_cpu_usage_balancing",
- Opts: runtime.RawExtension{
+ Params: runtime.RawExtension{
Raw: []byte(`{"AvgCPUUsageLowerBound": 0, "AvgCPUUsageUpperBound": 90, "MaxCPUUsageLowerBound": 0, "MaxCPUUsageUpperBound": 100}`),
},
},
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index ee1c456b4..59c099623 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -301,7 +301,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
+ Params: runtime.RawExtension{
Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`),
},
},
@@ -314,7 +314,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
filters: []v1alpha1.StepSpec{
{
Name: "filter_status_conditions",
- Opts: runtime.RawExtension{
+ Params: runtime.RawExtension{
Raw: []byte(`invalid json`),
},
},
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index 14c0027d2..055b5b644 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -28,7 +28,7 @@ type Detector[Opts any] struct {
func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
d.Client = client
- opts := conf.NewRawOptsBytes(step.Opts.Raw)
+ opts := conf.NewRawOptsBytes(step.Params.Raw)
if err := d.Load(opts); err != nil {
return err
}
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index 524c69547..36d988850 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -24,7 +24,7 @@ func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
- Opts: runtime.RawExtension{Raw: []byte(`{
+ Params: runtime.RawExtension{Raw: []byte(`{
"option1": "value1",
"option2": 2
}`)},
diff --git a/internal/scheduling/lib/base_step.go b/internal/scheduling/lib/base_step.go
index 699ccaa34..c58a5b8bc 100644
--- a/internal/scheduling/lib/base_step.go
+++ b/internal/scheduling/lib/base_step.go
@@ -28,7 +28,7 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
// Init the step with the database and options.
func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- opts := conf.NewRawOptsBytes(step.Opts.Raw)
+ opts := conf.NewRawOptsBytes(step.Params.Raw)
if err := s.Load(opts); err != nil {
return err
}
From ca17468eb258c5cfc6a5378f18b3910147ce7044 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:24:59 +0100
Subject: [PATCH 20/41] Adjust pipeline yaml specs
---
.../cortex-cinder/templates/pipelines.yaml | 3 +-
.../cortex-ironcore/templates/pipelines.yaml | 8 +--
.../cortex-manila/templates/pipelines.yaml | 9 ++-
.../cortex-nova/templates/pipelines.yaml | 37 +++--------
.../cortex-nova/templates/pipelines_kvm.yaml | 66 +++++++------------
.../cortex-pods/templates/pipelines.yaml | 18 ++---
6 files changed, 48 insertions(+), 93 deletions(-)
diff --git a/helm/bundles/cortex-cinder/templates/pipelines.yaml b/helm/bundles/cortex-cinder/templates/pipelines.yaml
index 1e876cff0..72c47b019 100644
--- a/helm/bundles/cortex-cinder/templates/pipelines.yaml
+++ b/helm/bundles/cortex-cinder/templates/pipelines.yaml
@@ -11,4 +11,5 @@ spec:
for additional filtering and weighing via this external scheduler pipeline.
Cortex returns a ranked list of hosts back to cinder for final selection.
type: filter-weigher
- steps: []
+ filters: []
+ weighers: []
\ No newline at end of file
diff --git a/helm/bundles/cortex-ironcore/templates/pipelines.yaml b/helm/bundles/cortex-ironcore/templates/pipelines.yaml
index 99743fd03..5598e60da 100644
--- a/helm/bundles/cortex-ironcore/templates/pipelines.yaml
+++ b/helm/bundles/cortex-ironcore/templates/pipelines.yaml
@@ -9,12 +9,10 @@ spec:
This pipeline is used to schedule ironcore machines onto machinepools.
type: filter-weigher
createDecisions: true
- steps:
- - type: weigher
- name: noop
+ filters: []
+ weighers:
+ - name: noop
description: |
This is only a passthrough step which assigns a zero-weight to all machinepool
candidates. It is used as a placeholder step in the ironcore machines scheduler
pipeline.
- knowledges: []
- mandatory: false
diff --git a/helm/bundles/cortex-manila/templates/pipelines.yaml b/helm/bundles/cortex-manila/templates/pipelines.yaml
index 8aa32cefb..80f4999d3 100644
--- a/helm/bundles/cortex-manila/templates/pipelines.yaml
+++ b/helm/bundles/cortex-manila/templates/pipelines.yaml
@@ -11,15 +11,15 @@ spec:
for additional filtering and weighing via this external scheduler pipeline.
Cortex returns a ranked list of hosts back to manila for final selection.
type: filter-weigher
- steps:
- - type: weigher
- name: netapp_cpu_usage_balancing
+ filters: []
+ weighers:
+ - name: netapp_cpu_usage_balancing
description: |
This step uses netapp storage pool cpu metrics condensed into a feature
to balance manila share placements across available storage pools.
Its main purpose is to avoid cpu overutilization on a storage pool which
may lead to performance degradation for shares placed on that pool.
- opts:
+ params:
# Min-max scaling for gap-fitting based on CPU usage (pct)
avgCPUUsageLowerBound: 0 # pct
avgCPUUsageUpperBound: 10 # pct
@@ -29,4 +29,3 @@ spec:
maxCPUUsageUpperBound: 10 # pct
maxCPUUsageActivationLowerBound: 0.0
maxCPUUsageActivationUpperBound: -0.25
- mandatory: false
diff --git a/helm/bundles/cortex-nova/templates/pipelines.yaml b/helm/bundles/cortex-nova/templates/pipelines.yaml
index c241f548e..cd90f6031 100644
--- a/helm/bundles/cortex-nova/templates/pipelines.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines.yaml
@@ -14,47 +14,35 @@ spec:
This is the pipeline used for VMware.
type: filter-weigher
createDecisions: false
- steps:
- - type: weigher
- name: vmware_hana_binpacking
+ filters: []
+ weighers:
+ - name: vmware_hana_binpacking
description: |
This step pulls HANA VMs onto the smallest possible gaps on HANA-exclusive
VMware hosts. In this way hosts with much free space are held free for
larger HANA VMs, improving overall packing efficiency for HANA workloads.
- knowledges:
- - name: host-utilization
- - name: host-capabilities
- opts:
+ params:
ramUtilizedAfterLowerBoundPct: 0
ramUtilizedAfterUpperBoundPct: 100
ramUtilizedAfterActivationLowerBound: 0.0
ramUtilizedAfterActivationUpperBound: 1.0
- mandatory: false
- - type: weigher
- name: vmware_general_purpose_balancing
+ - name: vmware_general_purpose_balancing
description: |
This step balances non-HANA VMs across non-HANA exclusive VMware hosts. It
pulls vms onto the freeest hosts possible to ensure an even distribution of
workloads across the available infrastructure.
- knowledges:
- - name: host-utilization
- - name: host-capabilities
- opts:
+ params:
ramUtilizedLowerBoundPct: 0
ramUtilizedUpperBoundPct: 100
ramUtilizedActivationLowerBound: 1.0
ramUtilizedActivationUpperBound: 0.0
- mandatory: false
- - type: weigher
- name: vmware_avoid_long_term_contended_hosts
+ - name: vmware_avoid_long_term_contended_hosts
description: |
This step avoids placing vms on vmware hosts with a high CPU contention over
a longer period of time, based on vrops contention metrics. In particular,
this step looks at a longer time window of 4 weeks to identify hosts that
are consistently contended.
- knowledges:
- - name: vmware-long-term-contended-hosts
- opts:
+ params:
avgCPUContentionLowerBound: 0 # pct
avgCPUContentionUpperBound: 10 # pct
avgCPUContentionActivationLowerBound: 0.0
@@ -63,17 +51,13 @@ spec:
maxCPUContentionUpperBound: 10 # pct
maxCPUContentionActivationLowerBound: 0.0
maxCPUContentionActivationUpperBound: -0.25
- mandatory: false
- - type: weigher
- name: vmware_avoid_short_term_contended_hosts
+ - name: vmware_avoid_short_term_contended_hosts
description: |
This step avoids placing vms on vmware hosts with a high CPU contention over
a shorter period of time, based on vrops contention metrics. In particular,
this step looks at a shorter time window of 20 minutes to identify hosts that
are currently contended.
- knowledges:
- - name: vmware-short-term-contended-hosts
- opts:
+ params:
avgCPUContentionLowerBound: 0 # pct
avgCPUContentionUpperBound: 10 # pct
avgCPUContentionActivationLowerBound: 0.0
@@ -82,4 +66,3 @@ spec:
maxCPUContentionUpperBound: 10 # pct
maxCPUContentionActivationLowerBound: 0.0
maxCPUContentionActivationUpperBound: -0.25
- mandatory: false
diff --git a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
index 0ba7926da..8f1af303c 100644
--- a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
@@ -17,7 +17,8 @@ spec:
{{- if $createDecisions }}
createDecisions: true
{{- end }}
- steps: []
+ filters: []
+ weighers: []
---
apiVersion: cortex.cloud/v1alpha1
kind: Pipeline
@@ -35,78 +36,66 @@ spec:
{{- if $createDecisions }}
createDecisions: true
{{- end }}
- steps:
- - type: filter
- impl: filter_host_instructions
+ filters:
+ - name: filter_host_instructions
description: |
This step will consider the `ignore_hosts` and `force_hosts` instructions
from the nova scheduler request spec to filter out or exclusively allow
certain hosts.
- knowledges: []
- - type: filter
- impl: filter_has_enough_capacity
+ - name: filter_has_enough_capacity
description: |
This step will filter out hosts that do not have enough available capacity
to host the requested flavor. If enabled, this step will subtract the
current reservations residing on this host from the available capacity.
- opts:
+ params:
# If reserved space should be locked even for matching requests.
# For the reservations pipeline, we don't want to unlock
# reserved space, to avoid reservations for the same project
# and flavor to overlap.
lockReserved: true
- - type: filter
- impl: filter_has_requested_traits
+ - name: filter_has_requested_traits
description: |
This step filters hosts that do not have the requested traits given by the
nova flavor extra spec: "trait:": "forbidden" means the host must
not have the specified trait. "trait:": "required" means the host
must have the specified trait.
- - type: filter
- impl: filter_has_accelerators
+ - name: filter_has_accelerators
description: |
This step will filter out hosts without the trait `COMPUTE_ACCELERATORS` if
the nova flavor extra specs request accelerators via "accel:device_profile".
- - type: filter
- impl: filter_correct_az
+ - name: filter_correct_az
description: |
This step will filter out hosts whose aggregate information indicates they
are not placed in the requested availability zone.
- - type: filter
- impl: filter_status_conditions
+ - name: filter_status_conditions
description: |
This step will filter out hosts for which the hypervisor status conditions
do not meet the expected values, for example, that the hypervisor is ready
and not disabled.
- - type: filter
- impl: filter_maintenance
+ - name: filter_maintenance
description: |
This step will filter out hosts that are currently in maintenance mode that
prevents scheduling, for example, manual maintenance or termination.
- - type: filter
- impl: filter_external_customer
+ - name: filter_external_customer
description: |
This step prefix-matches the domain name for external customer domains and
filters out hosts that are not intended for external customers. It considers
the `CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED` trait on hosts as well as the
`domain_name` scheduler hint from the nova request spec.
- opts:
+ params:
domainNamePrefixes: ["iaas-"]
- - type: filter
- impl: filter_packed_virtqueue
+ - name: filter_packed_virtqueue
description: |
If the flavor extra specs contain the `hw:virtio_packed_ring` key, or the
image properties contain the `hw_virtio_packed_ring` key, this step will
filter out hosts that do not have the `COMPUTE_NET_VIRTIO_PACKED` trait.
- - type: filter
- impl: filter_allowed_projects
+ - name: filter_allowed_projects
description: |
This step filters hosts based on allowed projects defined in the
hypervisor resource. Note that hosts allowing all projects are still
accessible and will not be filtered out. In this way some hypervisors
are made accessible to some projects only.
- - type: filter
- impl: filter_capabilities
+ - name: filter_capabilities
description: |
This step will filter out hosts that do not meet the compute capabilities
requested by the nova flavor extra specs, like `{"arch": "x86_64",
@@ -115,29 +104,26 @@ spec:
Note: currently, advanced boolean/numeric operators for the capabilities
like `>`, `!`, ... are not supported because they are not used by any of our
flavors in production.
- - type: filter
- impl: filter_instance_group_affinity
+ - name: filter_instance_group_affinity
description: |
This step selects hosts in the instance group specified in the nova
scheduler request spec.
- - type: filter
- impl: filter_instance_group_anti_affinity
+ - name: filter_instance_group_anti_affinity
description: |
This step selects hosts not in the instance group specified in the nova
scheduler request spec, but only until the max_server_per_host limit is
reached (default = 1).
- - type: filter
- impl: filter_live_migratable
+ - name: filter_live_migratable
description: |
This step ensures that the target host of a live migration can accept
the migrating VM, by checking cpu architecture, cpu features, emulated
devices, and cpu modes.
- - type: filter
- impl: filter_requested_destination
+ - name: filter_requested_destination
description: |
This step filters hosts based on the `requested_destination` instruction
from the nova scheduler request spec. It supports filtering by host and
by aggregates.
+ weighers: []
---
apiVersion: cortex.cloud/v1alpha1
kind: Pipeline
@@ -153,15 +139,11 @@ spec:
{{- if $createDecisions }}
createDecisions: true
{{- end }}
- steps:
- - type: descheduler
- impl: avoid_high_steal_pct
+ detectors:
+ - name: avoid_high_steal_pct
description: |
This step will deschedule VMs once they reach this CPU steal percentage over
the observed time span.
- knowledges:
- - name: kvm-libvirt-domain-cpu-steal-pct
- opts:
+ params:
maxStealPctOverObservedTimeSpan: 20.0
- mandatory: false
{{- end }}
\ No newline at end of file
diff --git a/helm/bundles/cortex-pods/templates/pipelines.yaml b/helm/bundles/cortex-pods/templates/pipelines.yaml
index 055e2190d..0dd3babd0 100644
--- a/helm/bundles/cortex-pods/templates/pipelines.yaml
+++ b/helm/bundles/cortex-pods/templates/pipelines.yaml
@@ -9,25 +9,17 @@ spec:
This pipeline is used to schedule pods onto nodes.
type: filter-weigher
createDecisions: true
- steps:
- - type: filter
- name: noop
+ filters:
+ - name: noop
description: |
This is only a passthrough step which lets all pod candidates through.
It is used as a placeholder step in the pods scheduler pipeline.
- knowledges: []
- mandatory: false
- - type: filter
- name: taint
+ - name: taint
description: |
Filters nodes based on taints, excluding nodes with NoSchedule taints
unless the pod has matching tolerations.
- knowledges: []
- mandatory: true
- - type: filter
- name: nodeaffinity
+ - name: nodeaffinity
description: |
Filters nodes based on pod's node affinity requirements, matching
nodes that satisfy the specified label selectors.
- knowledges: []
- mandatory: true
+ weighers: []
From a3ef2ef12d8a5eabcd36ebd73d7ffccf7a878036 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:26:38 +0100
Subject: [PATCH 21/41] Remove unnecessary createDecisions helm value
---
helm/bundles/cortex-nova/templates/pipelines.yaml | 1 -
helm/bundles/cortex-nova/templates/pipelines_kvm.yaml | 7 -------
helm/bundles/cortex-nova/values.yaml | 4 ----
3 files changed, 12 deletions(-)
diff --git a/helm/bundles/cortex-nova/templates/pipelines.yaml b/helm/bundles/cortex-nova/templates/pipelines.yaml
index cd90f6031..4c6148019 100644
--- a/helm/bundles/cortex-nova/templates/pipelines.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines.yaml
@@ -1,4 +1,3 @@
-{{- $createDecisions := .Values.pipelines.createDecisions | default false }}
---
apiVersion: cortex.cloud/v1alpha1
kind: Pipeline
diff --git a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
index 8f1af303c..3d9f2b694 100644
--- a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
@@ -1,4 +1,3 @@
-{{- $createDecisions := .Values.pipelines.createDecisions | default false }}
{{- if .Values.kvm.enabled }}
---
apiVersion: cortex.cloud/v1alpha1
@@ -14,9 +13,7 @@ spec:
Cortex returns a ranked list of hosts back to nova for final selection.
This is the pipeline used for KVM hypervisors (qemu and cloud-hypervisor).
type: filter-weigher
- {{- if $createDecisions }}
createDecisions: true
- {{- end }}
filters: []
weighers: []
---
@@ -33,9 +30,7 @@ spec:
cortex's weighing steps to provide an optimized host selection for the reservation.
This is the pipeline used for KVM hypervisors (qemu and cloud-hypervisor).
type: filter-weigher
- {{- if $createDecisions }}
createDecisions: true
- {{- end }}
filters:
- name: filter_host_instructions
description: |
@@ -136,9 +131,7 @@ spec:
compute hosts in order to optimize resource usage and performance.
This is the pipeline used for KVM hypervisors (qemu and cloud-hypervisor).
type: descheduler
- {{- if $createDecisions }}
createDecisions: true
- {{- end }}
detectors:
- name: avoid_high_steal_pct
description: |
diff --git a/helm/bundles/cortex-nova/values.yaml b/helm/bundles/cortex-nova/values.yaml
index a2aece78a..fcbce1cfd 100644
--- a/helm/bundles/cortex-nova/values.yaml
+++ b/helm/bundles/cortex-nova/values.yaml
@@ -62,10 +62,6 @@ openstack:
enabled: false
<<: *sharedSSOCert
-pipelines:
- # Use this flag to disable the creation of decisions across all pipelines.
- createDecisions: false
-
kvm:
# Use this flag to enable/disable KVM host related features.
enabled: false
From a507a0385fa650f9be864e30443618f50e77f076 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:43:25 +0100
Subject: [PATCH 22/41] Cleanup: enable kvm features in local dev setup [skip
ci]
---
cortex.secrets.example.yaml | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/cortex.secrets.example.yaml b/cortex.secrets.example.yaml
index 0870f7208..daab61cc9 100644
--- a/cortex.secrets.example.yaml
+++ b/cortex.secrets.example.yaml
@@ -20,6 +20,10 @@ sharedSSOCert: &sharedSSOCert
# If true, the certificate is not verified.
selfSigned: "false"
+# Enable kvm pipelines and scheduling support.
+kvm:
+ enabled: true
+
prometheus:
url: "https://path-to-your-prometheus"
sso:
From 04c9d59402da72bbac7d7ffc6631ef69a7a284c6 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:43:52 +0100
Subject: [PATCH 23/41] Set all steps ready condition when nothing fails
---
internal/scheduling/lib/base_pipeline_controller.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/internal/scheduling/lib/base_pipeline_controller.go b/internal/scheduling/lib/base_pipeline_controller.go
index de45fd351..972a0564b 100644
--- a/internal/scheduling/lib/base_pipeline_controller.go
+++ b/internal/scheduling/lib/base_pipeline_controller.go
@@ -107,6 +107,13 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
Reason: "SomeStepsNotReady",
Message: initResult.NonCriticalErr.Error(),
})
+ } else {
+ meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
+ Type: v1alpha1.PipelineConditionAllStepsReady,
+ Status: metav1.ConditionTrue,
+ Reason: "AllStepsReady",
+ Message: "all pipeline steps are ready",
+ })
}
c.Pipelines[obj.Name] = initResult.Pipeline
From 8bb5479d2d14d4592fff914e48937dbd1fd7e571 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:44:17 +0100
Subject: [PATCH 24/41] Fix: reconcile knowledge when rawlength is 0
---
internal/knowledge/extractor/controller.go | 2 +-
internal/knowledge/extractor/controller_test.go | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/internal/knowledge/extractor/controller.go b/internal/knowledge/extractor/controller.go
index 527203ede..69c276080 100644
--- a/internal/knowledge/extractor/controller.go
+++ b/internal/knowledge/extractor/controller.go
@@ -47,7 +47,7 @@ func (r *KnowledgeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// Sanity checks.
lastExtracted := knowledge.Status.LastExtracted.Time
recency := knowledge.Spec.Recency.Duration
- if lastExtracted.Add(recency).After(time.Now()) {
+ if lastExtracted.Add(recency).After(time.Now()) && knowledge.Status.RawLength != 0 {
log.Info("skipping knowledge extraction, not yet time", "name", knowledge.Name)
return ctrl.Result{RequeueAfter: time.Until(lastExtracted.Add(recency))}, nil
}
diff --git a/internal/knowledge/extractor/controller_test.go b/internal/knowledge/extractor/controller_test.go
index d58cc0702..09bf170d2 100644
--- a/internal/knowledge/extractor/controller_test.go
+++ b/internal/knowledge/extractor/controller_test.go
@@ -84,6 +84,7 @@ func TestKnowledgeReconciler_Reconcile_SkipRecentExtraction(t *testing.T) {
},
Status: v1alpha1.KnowledgeStatus{
LastExtracted: metav1.NewTime(recentTime),
+ RawLength: 100, // Indicate that there is existing data
},
}
From 8c13d812522f0919840c8d803598a53477424ae1 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 13:48:06 +0100
Subject: [PATCH 25/41] Fix: reconcile datasource when object count is 0
---
internal/knowledge/datasources/openstack/controller.go | 2 +-
internal/knowledge/datasources/prometheus/controller.go | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/internal/knowledge/datasources/openstack/controller.go b/internal/knowledge/datasources/openstack/controller.go
index 7daec347c..95227aa1b 100644
--- a/internal/knowledge/datasources/openstack/controller.go
+++ b/internal/knowledge/datasources/openstack/controller.go
@@ -60,7 +60,7 @@ func (r *OpenStackDatasourceReconciler) Reconcile(ctx context.Context, req ctrl.
log.Info("skipping datasource, not an openstack datasource", "name", datasource.Name)
return ctrl.Result{}, nil
}
- if datasource.Status.NextSyncTime.After(time.Now()) {
+ if datasource.Status.NextSyncTime.After(time.Now()) && datasource.Status.NumberOfObjects != 0 {
log.Info("skipping datasource sync, not yet time", "name", datasource.Name)
return ctrl.Result{RequeueAfter: time.Until(datasource.Status.NextSyncTime.Time)}, nil
}
diff --git a/internal/knowledge/datasources/prometheus/controller.go b/internal/knowledge/datasources/prometheus/controller.go
index 089283d80..23bf6725c 100644
--- a/internal/knowledge/datasources/prometheus/controller.go
+++ b/internal/knowledge/datasources/prometheus/controller.go
@@ -52,7 +52,7 @@ func (r *PrometheusDatasourceReconciler) Reconcile(ctx context.Context, req ctrl
log.Info("skipping datasource, not a prometheus datasource", "name", datasource.Name)
return ctrl.Result{}, nil
}
- if datasource.Status.NextSyncTime.After(time.Now()) {
+ if datasource.Status.NextSyncTime.After(time.Now()) && datasource.Status.NumberOfObjects != 0 {
log.Info("skipping datasource sync, not yet time", "name", datasource.Name)
return ctrl.Result{RequeueAfter: time.Until(datasource.Status.NextSyncTime.Time)}, nil
}
From 3e4647a1d43f6bc75cb49dbe2a9d1cb7547a74c6 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 26 Jan 2026 17:28:50 +0100
Subject: [PATCH 26/41] Split into weigher, filter, detector impl WIP
---
api/v1alpha1/pipeline_types.go | 38 ++++++++++--
api/v1alpha1/zz_generated.deepcopy.go | 46 +++++++++++---
config/crd/bases/cortex.cloud_pipelines.yaml | 12 ----
config/crd/cortex.cloud_pipelines.yaml | 12 ----
.../templates/crd/cortex.cloud_pipelines.yaml | 12 ----
.../cinder/pipeline_controller_test.go | 34 +++++------
.../decisions/cinder/supported_steps.go | 4 +-
.../scheduling/decisions/machines/noop.go | 2 +-
.../machines/pipeline_controller_test.go | 24 ++++----
.../decisions/machines/supported_steps.go | 4 +-
.../manila/pipeline_controller_test.go | 32 +++++-----
.../weighers/netapp_cpu_usage_balancing.go | 6 +-
.../decisions/manila/supported_steps.go | 4 +-
.../nova/pipeline_controller_test.go | 60 +++++++++----------
.../filters/filter_allowed_projects.go | 2 +-
.../plugins/filters/filter_capabilities.go | 2 +-
.../nova/plugins/filters/filter_correct_az.go | 2 +-
.../filters/filter_external_customer.go | 2 +-
.../filters/filter_has_accelerators.go | 2 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filters/filter_has_requested_traits.go | 2 +-
.../filters/filter_host_instructions.go | 2 +-
.../filters/filter_instance_group_affinity.go | 2 +-
.../filter_instance_group_anti_affinity.go | 2 +-
.../plugins/filters/filter_live_migratable.go | 2 +-
.../filters/filter_live_migratable_test.go | 18 ++++--
.../plugins/filters/filter_maintenance.go | 2 +-
.../filters/filter_packed_virtqueue.go | 2 +-
.../filters/filter_requested_destination.go | 2 +-
.../filter_requested_destination_test.go | 12 ++--
.../filters/filter_status_conditions.go | 2 +-
.../vmware_anti_affinity_noisy_projects.go | 6 +-
.../vmware_avoid_long_term_contended_hosts.go | 6 +-
...vmware_avoid_short_term_contended_hosts.go | 6 +-
.../vmware_general_purpose_balancing.go | 6 +-
.../weighers/vmware_hana_binpacking.go | 6 +-
.../decisions/nova/supported_steps.go | 4 +-
.../pods/pipeline_controller_test.go | 24 ++++----
.../plugins/filters/filter_node_affinity.go | 2 +-
.../plugins/filters/filter_node_available.go | 2 +-
.../plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_noop.go | 2 +-
.../pods/plugins/filters/filter_taint.go | 2 +-
.../pods/plugins/weighers/binpack.go | 2 +-
.../decisions/pods/supported_steps.go | 4 +-
internal/scheduling/lib/base_filter.go | 22 +++++++
.../lib/base_pipeline_controller_test.go | 48 +++++++--------
internal/scheduling/lib/base_step.go | 34 +----------
internal/scheduling/lib/base_weigher.go | 50 ++++++++++++++++
internal/scheduling/lib/detector.go | 19 ++++++
internal/scheduling/lib/detector_test.go | 24 ++++++++
internal/scheduling/lib/filter.go | 19 ++++++
internal/scheduling/lib/filter_monitor.go | 43 +++++++++++++
.../lib/{step_test.go => filter_test.go} | 8 +--
.../scheduling/lib/filter_weigher_pipeline.go | 20 +++----
.../lib/filter_weigher_pipeline_test.go | 60 +++----------------
internal/scheduling/lib/step.go | 13 +---
internal/scheduling/lib/step_monitor.go | 37 ++++--------
internal/scheduling/lib/step_monitor_test.go | 18 +++---
internal/scheduling/lib/weigher.go | 19 ++++++
internal/scheduling/lib/weigher_monitor.go | 43 +++++++++++++
internal/scheduling/lib/weigher_test.go | 24 ++++++++
internal/scheduling/lib/weigher_validation.go | 6 +-
.../scheduling/lib/weigher_validation_test.go | 4 +-
64 files changed, 570 insertions(+), 365 deletions(-)
create mode 100644 internal/scheduling/lib/base_filter.go
create mode 100644 internal/scheduling/lib/base_weigher.go
create mode 100644 internal/scheduling/lib/detector.go
create mode 100644 internal/scheduling/lib/detector_test.go
create mode 100644 internal/scheduling/lib/filter.go
create mode 100644 internal/scheduling/lib/filter_monitor.go
rename internal/scheduling/lib/{step_test.go => filter_test.go} (58%)
create mode 100644 internal/scheduling/lib/weigher.go
create mode 100644 internal/scheduling/lib/weigher_monitor.go
create mode 100644 internal/scheduling/lib/weigher_test.go
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index 67835c4ed..ef14c9072 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -8,7 +8,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
-type StepSpec struct {
+type FilterSpec struct {
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the step that can be used
+ // +kubebuilder:validation:Optional
+ Params runtime.RawExtension `json:"params,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+}
+
+type WeigherSpec struct {
// The name of the scheduler step in the cortex implementation.
// Must match to a step implemented by the pipeline controller.
Name string `json:"name"`
@@ -29,6 +44,21 @@ type StepSpec struct {
Multiplier *float64 `json:"multiplier,omitempty"`
}
+type DetectorSpec struct {
+ // The name of the scheduler step in the cortex implementation.
+ // Must match to a step implemented by the pipeline controller.
+ Name string `json:"name"`
+
+ // Additional configuration for the step that can be used
+ // +kubebuilder:validation:Optional
+ Params runtime.RawExtension `json:"params,omitempty"`
+
+ // Additional description of the step which helps understand its purpose
+ // and decisions made by it.
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+}
+
type PipelineType string
const (
@@ -71,14 +101,14 @@ type PipelineSpec struct {
// Filters remove host candidates from an initial set, leaving
// valid candidates. Filters are run before weighers are applied.
// +kubebuilder:validation:Optional
- Filters []StepSpec `json:"filters,omitempty"`
+ Filters []FilterSpec `json:"filters,omitempty"`
// Ordered list of weighers to apply in a scheduling pipeline.
//
// This attribute is set only if the pipeline type is filter-weigher.
// These weighers are run after filters are applied.
// +kubebuilder:validation:Optional
- Weighers []StepSpec `json:"weighers,omitempty"`
+ Weighers []WeigherSpec `json:"weighers,omitempty"`
// Ordered list of detectors to apply in a descheduling pipeline.
//
@@ -86,7 +116,7 @@ type PipelineSpec struct {
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied.
// +kubebuilder:validation:Optional
- Detectors []StepSpec `json:"detectors,omitempty"`
+ Detectors []DetectorSpec `json:"detectors,omitempty"`
}
const (
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 7fba343ac..cdbae7dd4 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -425,6 +425,38 @@ func (in *DeschedulingStatus) DeepCopy() *DeschedulingStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) {
+ *out = *in
+ in.Params.DeepCopyInto(&out.Params)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec.
+func (in *DetectorSpec) DeepCopy() *DetectorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DetectorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
+ *out = *in
+ in.Params.DeepCopyInto(&out.Params)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
+func (in *FilterSpec) DeepCopy() *FilterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(FilterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityDatasource) DeepCopyInto(out *IdentityDatasource) {
*out = *in
@@ -842,21 +874,21 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
- *out = make([]StepSpec, len(*in))
+ *out = make([]FilterSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Weighers != nil {
in, out := &in.Weighers, &out.Weighers
- *out = make([]StepSpec, len(*in))
+ *out = make([]WeigherSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Detectors != nil {
in, out := &in.Detectors, &out.Detectors
- *out = make([]StepSpec, len(*in))
+ *out = make([]DetectorSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1098,7 +1130,7 @@ func (in *StepResult) DeepCopy() *StepResult {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepSpec) DeepCopyInto(out *StepSpec) {
+func (in *WeigherSpec) DeepCopyInto(out *WeigherSpec) {
*out = *in
in.Params.DeepCopyInto(&out.Params)
if in.Multiplier != nil {
@@ -1108,12 +1140,12 @@ func (in *StepSpec) DeepCopyInto(out *StepSpec) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec.
-func (in *StepSpec) DeepCopy() *StepSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherSpec.
+func (in *WeigherSpec) DeepCopy() *WeigherSpec {
if in == nil {
return nil
}
- out := new(StepSpec)
+ out := new(WeigherSpec)
in.DeepCopyInto(out)
return out
}
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 21aabe065..36402b022 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -79,12 +79,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -113,12 +107,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 21aabe065..36402b022 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -79,12 +79,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -113,12 +107,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index 6644dcd27..b835b13c0 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -85,12 +85,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
@@ -119,12 +113,6 @@ spec:
Additional description of the step which helps understand its purpose
and decisions made by it.
type: string
- multiplier:
- description: |-
- Optional multiplier to apply to the step's output.
- This can be used to increase or decrease the weight of a step
- relative to other steps in the same pipeline.
- type: number
name:
description: |-
The name of the scheduler step in the cortex implementation.
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index 7dd8c7081..b5b1daf66 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -84,8 +84,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -113,8 +113,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -175,8 +175,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
})
if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
@@ -284,8 +284,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -318,8 +318,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -372,8 +372,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -476,21 +476,21 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectNonCriticalError: false,
expectCriticalError: false,
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "test-plugin",
},
diff --git a/internal/scheduling/decisions/cinder/supported_steps.go b/internal/scheduling/decisions/cinder/supported_steps.go
index 9903bdb2f..90e5dc95d 100644
--- a/internal/scheduling/decisions/cinder/supported_steps.go
+++ b/internal/scheduling/decisions/cinder/supported_steps.go
@@ -8,12 +8,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type CinderWeigher = lib.Step[api.ExternalSchedulerRequest]
+type CinderWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the cinder scheduling.
var supportedWeighers = map[string]func() CinderWeigher{}
-type CinderFilter = lib.Step[api.ExternalSchedulerRequest]
+type CinderFilter = lib.Filter[api.ExternalSchedulerRequest]
// Configuration of filters supported by the cinder scheduling.
var supportedFilters = map[string]func() CinderFilter{}
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index 3b0104aa6..eeec4f848 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -15,7 +15,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, filter v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index 27aeb95ff..a12ade07d 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -211,21 +211,21 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectNonCriticalError: false,
expectCriticalError: false,
},
{
name: "noop step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{Name: "noop"},
},
expectNonCriticalError: false,
@@ -233,7 +233,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{Name: "unsupported"},
},
expectNonCriticalError: false,
@@ -318,8 +318,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -352,8 +352,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -399,8 +399,8 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/machines/supported_steps.go b/internal/scheduling/decisions/machines/supported_steps.go
index 730c92ded..4e04d64d1 100644
--- a/internal/scheduling/decisions/machines/supported_steps.go
+++ b/internal/scheduling/decisions/machines/supported_steps.go
@@ -8,12 +8,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type MachineWeigher = lib.Step[ironcore.MachinePipelineRequest]
+type MachineWeigher = lib.Weigher[ironcore.MachinePipelineRequest]
// Configuration of weighers supported by the machine scheduling.
var supportedWeighers = map[string]func() MachineWeigher{}
-type MachineFilter = lib.Step[ironcore.MachinePipelineRequest]
+type MachineFilter = lib.Filter[ironcore.MachinePipelineRequest]
// Configuration of filters supported by the machine scheduling.
var supportedFilters = map[string]func() MachineFilter{
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index 68c6b9c89..9b0f790e1 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -86,8 +86,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -115,8 +115,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -281,8 +281,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -315,8 +315,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -369,8 +369,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainManila,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -474,23 +474,23 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
knowledges []client.Object
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
knowledges: []client.Object{},
expectNonCriticalError: false,
expectCriticalError: false,
},
{
name: "supported netapp step",
- weighers: []v1alpha1.StepSpec{
+ weighers: []v1alpha1.WeigherSpec{
{
Name: "netapp_cpu_usage_balancing",
Params: runtime.RawExtension{
@@ -531,7 +531,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported-plugin",
},
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index d3a9b0bbb..11e9cca46 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -46,12 +46,12 @@ func (o NetappCPUUsageBalancingStepOpts) Validate() error {
// Step to balance CPU usage by avoiding highly used storage pools.
type NetappCPUUsageBalancingStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, NetappCPUUsageBalancingStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *NetappCPUUsageBalancingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *NetappCPUUsageBalancingStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "netapp-storage-pool-cpu-usage-manila"}); err != nil {
diff --git a/internal/scheduling/decisions/manila/supported_steps.go b/internal/scheduling/decisions/manila/supported_steps.go
index aee4af194..fca819711 100644
--- a/internal/scheduling/decisions/manila/supported_steps.go
+++ b/internal/scheduling/decisions/manila/supported_steps.go
@@ -9,12 +9,12 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type ManilaFilter = lib.Step[api.ExternalSchedulerRequest]
+type ManilaFilter = lib.Filter[api.ExternalSchedulerRequest]
// Configuration of filters supported by the manila scheduler.
var supportedFilters = map[string]func() ManilaFilter{}
-type ManilaWeigher = lib.Step[api.ExternalSchedulerRequest]
+type ManilaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the manila scheduler.
var supportedWeighers = map[string]func() ManilaWeigher{
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 59c099623..21d8b0a66 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -92,8 +92,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: false,
@@ -121,8 +121,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -173,8 +173,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
expectError: true,
@@ -264,21 +264,21 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectNonCriticalError: false,
expectCriticalError: false,
},
{
name: "supported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
},
@@ -288,7 +288,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported-plugin",
},
@@ -298,7 +298,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with scoping options",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
Params: runtime.RawExtension{
@@ -311,7 +311,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "step with invalid scoping options",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "filter_status_conditions",
Params: runtime.RawExtension{
@@ -427,8 +427,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -439,8 +439,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -475,8 +475,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -487,8 +487,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -547,8 +547,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
pipelineConf: &v1alpha1.Pipeline{
@@ -559,8 +559,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -597,8 +597,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
@@ -635,8 +635,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainNova,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
setupPipelineConfigs: true,
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index 3e08273a0..96815c618 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -14,7 +14,7 @@ import (
)
type FilterAllowedProjectsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Lock certain hosts for certain projects, based on the hypervisor spec.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index 31d10fd27..80dfa5b3c 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -15,7 +15,7 @@ import (
)
type FilterCapabilitiesStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Get the provided capabilities of a hypervisor resource in the format Nova expects.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index 5bfaab618..dfcdc9f4b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -13,7 +13,7 @@ import (
)
type FilterCorrectAZStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Only get hosts in the requested az.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index cc34afd07..b995be916 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -28,7 +28,7 @@ func (opts FilterExternalCustomerStepOpts) Validate() error {
}
type FilterExternalCustomerStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, FilterExternalCustomerStepOpts]
}
// Prefix-match the domain name for external customer domains and filter out hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 8320168a2..0a5b1339f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -14,7 +14,7 @@ import (
)
type FilterHasAcceleratorsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with accelerators.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index b80173473..4b07ef56c 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -23,7 +23,7 @@ type FilterHasEnoughCapacityOpts struct {
func (FilterHasEnoughCapacityOpts) Validate() error { return nil }
type FilterHasEnoughCapacity struct {
- lib.BaseStep[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, FilterHasEnoughCapacityOpts]
}
// Filter hosts that don't have enough capacity to run the requested flavor.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index ea4f81379..35367dff3 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -15,7 +15,7 @@ import (
)
type FilterHasRequestedTraits struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts that do not have the requested traits given by the extra spec:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index 66a9fccf3..cd57e2e4d 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -12,7 +12,7 @@ import (
)
type FilterHostInstructionsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Filter hosts based on instructions given in the request spec. Supported are:
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index 1b549aac4..fb42e7c19 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -12,7 +12,7 @@ import (
)
type FilterInstanceGroupAffinityStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts in spec.instance_group.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index 00243d6cb..e9390d9c3 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -14,7 +14,7 @@ import (
)
type FilterInstanceGroupAntiAffinityStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Select hosts not in spec_obj.instance_group but only until
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index 1076898b1..f31e72516 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -15,7 +15,7 @@ import (
)
type FilterLiveMigratableStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check if the encountered request spec is a live migration.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index c719a3eb6..b987e9c15 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -727,8 +727,10 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -812,8 +814,10 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -856,8 +860,10 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index 867317496..a8d386c4d 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -13,7 +13,7 @@ import (
)
type FilterMaintenanceStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index f0066218b..bb443ef57 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -14,7 +14,7 @@ import (
)
type FilterPackedVirtqueueStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If requested, only get hosts with packed virtqueues.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index 88285edd1..c9f0319fb 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -14,7 +14,7 @@ import (
)
type FilterRequestedDestinationStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// If `requested_destination` is set in the request spec, filter hosts
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index ca1faaa07..5952e4c3f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -494,8 +494,10 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
@@ -575,8 +577,10 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- Client: fakeClient,
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ Client: fakeClient,
+ },
},
}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index a0b96e0ed..870aaa58b 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -15,7 +15,7 @@ import (
)
type FilterStatusConditionsStep struct {
- lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
}
// Check that all status conditions meet the expected values, for example,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 0b1622376..43086ef97 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -37,12 +37,12 @@ func (o VMwareAntiAffinityNoisyProjectsStepOpts) Validate() error {
// Step to avoid noisy projects by downvoting the hosts they are running on.
type VMwareAntiAffinityNoisyProjectsStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAntiAffinityNoisyProjectsStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *VMwareAntiAffinityNoisyProjectsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *VMwareAntiAffinityNoisyProjectsStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-project-noisiness"}); err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index 2d7359928..eff4be46a 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -46,12 +46,12 @@ func (o VMwareAvoidLongTermContendedHostsStepOpts) Validate() error {
// Step to avoid long term contended hosts by downvoting them.
type VMwareAvoidLongTermContendedHostsStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidLongTermContendedHostsStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *VMwareAvoidLongTermContendedHostsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *VMwareAvoidLongTermContendedHostsStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-long-term-contended-hosts"}); err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index 3d841d25a..aca0fd2a7 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -46,12 +46,12 @@ func (o VMwareAvoidShortTermContendedHostsStepOpts) Validate() error {
// Step to avoid recently contended hosts by downvoting them.
type VMwareAvoidShortTermContendedHostsStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareAvoidShortTermContendedHostsStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *VMwareAvoidShortTermContendedHostsStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *VMwareAvoidShortTermContendedHostsStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "vmware-short-term-contended-hosts"}); err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index f4d39f1bc..1ed84a140 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -36,12 +36,12 @@ func (o VMwareGeneralPurposeBalancingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareGeneralPurposeBalancingStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareGeneralPurposeBalancingStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *VMwareGeneralPurposeBalancingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *VMwareGeneralPurposeBalancingStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx,
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index c5e017e66..62a91de43 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -36,12 +36,12 @@ func (o VMwareHanaBinpackingStepOpts) Validate() error {
// Step to balance VMs on hosts based on the host's available resources.
type VMwareHanaBinpackingStep struct {
// BaseStep is a helper struct that provides common functionality for all steps.
- lib.BaseStep[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
+ lib.BaseWeigher[api.ExternalSchedulerRequest, VMwareHanaBinpackingStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *VMwareHanaBinpackingStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- if err := s.BaseStep.Init(ctx, client, step); err != nil {
+func (s *VMwareHanaBinpackingStep) Init(ctx context.Context, client client.Client, weigher v1alpha1.WeigherSpec) error {
+ if err := s.BaseWeigher.Init(ctx, client, weigher); err != nil {
return err
}
if err := s.CheckKnowledges(ctx,
diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_steps.go
index 57957589d..4821e7ba5 100644
--- a/internal/scheduling/decisions/nova/supported_steps.go
+++ b/internal/scheduling/decisions/nova/supported_steps.go
@@ -10,7 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type NovaFilter = lib.Step[api.ExternalSchedulerRequest]
+type NovaFilter = lib.Filter[api.ExternalSchedulerRequest]
// Configuration of filters supported by the nova scheduler.
var supportedFilters = map[string]func() NovaFilter{
@@ -31,7 +31,7 @@ var supportedFilters = map[string]func() NovaFilter{
"filter_requested_destination": func() NovaFilter { return &filters.FilterRequestedDestinationStep{} },
}
-type NovaWeigher = lib.Step[api.ExternalSchedulerRequest]
+type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the nova scheduler.
var supportedWeighers = map[string]func() NovaWeigher{
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index cdc11b86b..7a0720bcc 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -186,21 +186,21 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- filters []v1alpha1.StepSpec
- weighers []v1alpha1.StepSpec
+ filters []v1alpha1.FilterSpec
+ weighers []v1alpha1.WeigherSpec
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "empty steps",
- filters: []v1alpha1.StepSpec{},
- weighers: []v1alpha1.StepSpec{},
+ filters: []v1alpha1.FilterSpec{},
+ weighers: []v1alpha1.WeigherSpec{},
expectNonCriticalError: false,
expectCriticalError: false,
},
{
name: "noop step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "noop",
},
@@ -210,7 +210,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- filters: []v1alpha1.StepSpec{
+ filters: []v1alpha1.FilterSpec{
{
Name: "unsupported",
},
@@ -298,8 +298,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
@@ -332,8 +332,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: false,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: false,
@@ -379,8 +379,8 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Type: v1alpha1.PipelineTypeFilterWeigher,
SchedulingDomain: v1alpha1.SchedulingDomainPods,
CreateDecisions: true,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
createDecisions: true,
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index 265bffa24..acacc6ea6 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -19,7 +19,7 @@ type NodeAffinityFilter struct {
Alias string
}
-func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index 45ae98067..c668e5f0a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -18,7 +18,7 @@ type NodeAvailableFilter struct {
Alias string
}
-func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 44d185580..70e897b6a 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -19,7 +19,7 @@ type NodeCapacityFilter struct {
Alias string
}
-func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 3cd328a50..08fbf1cd4 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -18,7 +18,7 @@ type NoopFilter struct {
Alias string
}
-func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 82135b161..697c41466 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -18,7 +18,7 @@ type TaintFilter struct {
Alias string
}
-func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index f5572ae96..65ca207ab 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -28,7 +28,7 @@ func (o BinpackingStepOpts) Validate() error {
}
type BinpackingStep struct {
- lib.BaseStep[api.PodPipelineRequest, BinpackingStepOpts]
+ lib.BaseWeigher[api.PodPipelineRequest, BinpackingStepOpts]
}
func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.StepResult, error) {
diff --git a/internal/scheduling/decisions/pods/supported_steps.go b/internal/scheduling/decisions/pods/supported_steps.go
index 57e2d8151..43c8f1ac2 100644
--- a/internal/scheduling/decisions/pods/supported_steps.go
+++ b/internal/scheduling/decisions/pods/supported_steps.go
@@ -10,7 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type PodFilter = lib.Step[pods.PodPipelineRequest]
+type PodFilter = lib.Filter[pods.PodPipelineRequest]
// Configuration of filters supported by the pods scheduler.
var supportedFilters = map[string]func() PodFilter{
@@ -20,7 +20,7 @@ var supportedFilters = map[string]func() PodFilter{
"nodecapacity": func() PodFilter { return &filters.NodeCapacityFilter{} },
}
-type PodWeigher = lib.Step[pods.PodPipelineRequest]
+type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
// Configuration of weighers supported by the pods scheduler.
var supportedWeighers = map[string]func() PodWeigher{
diff --git a/internal/scheduling/lib/base_filter.go b/internal/scheduling/lib/base_filter.go
new file mode 100644
index 000000000..e774eaa95
--- /dev/null
+++ b/internal/scheduling/lib/base_filter.go
@@ -0,0 +1,22 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseFilter[RequestType PipelineRequest, Opts StepOpts] struct {
+ BaseStep[RequestType, Opts]
+}
+
+// Init the filter with the database and options.
+func (s *BaseFilter[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+ return s.BaseStep.Init(ctx, client, step.Params)
+}
diff --git a/internal/scheduling/lib/base_pipeline_controller_test.go b/internal/scheduling/lib/base_pipeline_controller_test.go
index 52bc20414..ffe20d13f 100644
--- a/internal/scheduling/lib/base_pipeline_controller_test.go
+++ b/internal/scheduling/lib/base_pipeline_controller_test.go
@@ -49,8 +49,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -69,8 +69,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -80,8 +80,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -91,8 +91,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeDescheduler,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
{
@@ -102,8 +102,8 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
},
@@ -179,12 +179,12 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{
+ Filters: []v1alpha1.FilterSpec{
{
Name: "test-filter",
},
},
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -218,7 +218,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -236,7 +236,7 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
},
knowledges: []v1alpha1.Knowledge{},
@@ -314,8 +314,8 @@ func TestBasePipelineController_HandlePipelineCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -359,8 +359,8 @@ func TestBasePipelineController_HandlePipelineUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Filters: []v1alpha1.StepSpec{},
- Weighers: []v1alpha1.StepSpec{},
+ Filters: []v1alpha1.FilterSpec{},
+ Weighers: []v1alpha1.WeigherSpec{},
},
}
@@ -469,7 +469,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -483,7 +483,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -513,7 +513,7 @@ func TestBasePipelineController_handleKnowledgeChange(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -587,7 +587,7 @@ func TestBasePipelineController_HandleKnowledgeCreated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -735,7 +735,7 @@ func TestBasePipelineController_HandleKnowledgeUpdated(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
@@ -800,7 +800,7 @@ func TestBasePipelineController_HandleKnowledgeDeleted(t *testing.T) {
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
Type: v1alpha1.PipelineTypeFilterWeigher,
- Weighers: []v1alpha1.StepSpec{
+ Weighers: []v1alpha1.WeigherSpec{
{
Name: "test-weigher",
},
diff --git a/internal/scheduling/lib/base_step.go b/internal/scheduling/lib/base_step.go
index c58a5b8bc..0fd20a42b 100644
--- a/internal/scheduling/lib/base_step.go
+++ b/internal/scheduling/lib/base_step.go
@@ -5,13 +5,9 @@ package lib
import (
"context"
- "errors"
- "fmt"
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/pkg/conf"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -27,8 +23,8 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
}
// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- opts := conf.NewRawOptsBytes(step.Params.Raw)
+func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, params runtime.RawExtension) error {
+ opts := conf.NewRawOptsBytes(params.Raw)
if err := s.Load(opts); err != nil {
return err
}
@@ -40,30 +36,6 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl
return nil
}
-// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
- if d.Client == nil {
- return errors.New("kubernetes client not initialized")
- }
- for _, objRef := range kns {
- knowledge := &v1alpha1.Knowledge{}
- if err := d.Client.Get(ctx, client.ObjectKey{
- Name: objRef.Name,
- Namespace: objRef.Namespace,
- }, knowledge); err != nil {
- return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return fmt.Errorf("knowledge %s not ready", objRef.Name)
- }
- if knowledge.Status.RawLength == 0 {
- return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
- }
- }
- return nil
-}
-
// Get a default result (no action) for the input weight keys given in the request.
// Use this to initialize the result before applying filtering/weighing logic.
func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
diff --git a/internal/scheduling/lib/base_weigher.go b/internal/scheduling/lib/base_weigher.go
new file mode 100644
index 000000000..ac1fac034
--- /dev/null
+++ b/internal/scheduling/lib/base_weigher.go
@@ -0,0 +1,50 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] struct {
+ BaseStep[RequestType, Opts]
+}
+
+// Init the weigher with the database and options.
+func (s *BaseWeigher[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+ return s.BaseStep.Init(ctx, client, step.Params)
+}
+
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+ if d.Client == nil {
+ return errors.New("kubernetes client not initialized")
+ }
+ for _, objRef := range kns {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := d.Client.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return fmt.Errorf("knowledge %s not ready", objRef.Name)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
+ }
+ }
+ return nil
+}
diff --git a/internal/scheduling/lib/detector.go b/internal/scheduling/lib/detector.go
new file mode 100644
index 000000000..371cf02f9
--- /dev/null
+++ b/internal/scheduling/lib/detector.go
@@ -0,0 +1,19 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Interface for a detector as part of the (de)scheduling pipeline.
+type Detector[RequestType PipelineRequest] interface {
+ Step[RequestType]
+
+ // Configure the step and initialize things like a database connection.
+ Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
+}
diff --git a/internal/scheduling/lib/detector_test.go b/internal/scheduling/lib/detector_test.go
new file mode 100644
index 000000000..b6cc118b0
--- /dev/null
+++ b/internal/scheduling/lib/detector_test.go
@@ -0,0 +1,24 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type mockDetector[RequestType PipelineRequest] struct {
+ InitFunc func(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
+ RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
+}
+
+func (m *mockDetector[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+ return m.InitFunc(ctx, client, step)
+}
+func (m *mockDetector[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ return m.RunFunc(traceLog, request)
+}
diff --git a/internal/scheduling/lib/filter.go b/internal/scheduling/lib/filter.go
new file mode 100644
index 000000000..7e66d2361
--- /dev/null
+++ b/internal/scheduling/lib/filter.go
@@ -0,0 +1,19 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Interface for a filter as part of the scheduling pipeline.
+type Filter[RequestType PipelineRequest] interface {
+ Step[RequestType]
+
+ // Configure the filter and initialize things like a database connection.
+ Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error
+}
diff --git a/internal/scheduling/lib/filter_monitor.go b/internal/scheduling/lib/filter_monitor.go
new file mode 100644
index 000000000..b09d4c09a
--- /dev/null
+++ b/internal/scheduling/lib/filter_monitor.go
@@ -0,0 +1,43 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Wraps a scheduler filter to monitor its execution.
+type FilterMonitor[RequestType PipelineRequest] struct {
+ // The filter to monitor.
+ filter Filter[RequestType]
+ // The monitor tracking the step's execution.
+ monitor *StepMonitor[RequestType]
+}
+
+// Wrap the given filter with a monitor.
+func monitorFilter[RequestType PipelineRequest](
+ filter Filter[RequestType],
+ stepName string,
+ m PipelineMonitor,
+) *FilterMonitor[RequestType] {
+
+ return &FilterMonitor[RequestType]{
+ filter: filter,
+ monitor: monitorStep[RequestType](stepName, m),
+ }
+}
+
+// Initialize the wrapped filter.
+func (fm *FilterMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+ return fm.filter.Init(ctx, client, step)
+}
+
+// Run the filter and observe its execution.
+func (fm *FilterMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ return fm.monitor.RunWrapped(traceLog, request, fm.filter)
+}
diff --git a/internal/scheduling/lib/step_test.go b/internal/scheduling/lib/filter_test.go
similarity index 58%
rename from internal/scheduling/lib/step_test.go
rename to internal/scheduling/lib/filter_test.go
index 2e5e899e7..37b1d58e5 100644
--- a/internal/scheduling/lib/step_test.go
+++ b/internal/scheduling/lib/filter_test.go
@@ -11,14 +11,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockStep[RequestType PipelineRequest] struct {
- InitFunc func(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+type mockFilter[RequestType PipelineRequest] struct {
+ InitFunc func(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error
RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
}
-func (m *mockStep[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockFilter[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
return m.InitFunc(ctx, client, step)
}
-func (m *mockStep[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockFilter[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
index 07adbac31..42a399e70 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -25,11 +25,11 @@ type filterWeigherPipeline[RequestType PipelineRequest] struct {
// The order in which filters are applied, by their step name.
filtersOrder []string
// The filters by their name.
- filters map[string]Step[RequestType]
+ filters map[string]Filter[RequestType]
// The order in which weighers are applied, by their step name.
weighersOrder []string
// The weighers by their name.
- weighers map[string]Step[RequestType]
+ weighers map[string]Weigher[RequestType]
// Multipliers to apply to weigher outputs.
weighersMultipliers map[string]float64
// Monitor to observe the pipeline.
@@ -41,10 +41,10 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
ctx context.Context,
client client.Client,
name string,
- supportedFilters map[string]func() Step[RequestType],
- confedFilters []v1alpha1.StepSpec,
- supportedWeighers map[string]func() Step[RequestType],
- confedWeighers []v1alpha1.StepSpec,
+ supportedFilters map[string]func() Filter[RequestType],
+ confedFilters []v1alpha1.FilterSpec,
+ supportedWeighers map[string]func() Weigher[RequestType],
+ confedWeighers []v1alpha1.WeigherSpec,
monitor PipelineMonitor,
) PipelineInitResult[Pipeline[RequestType]] {
@@ -60,7 +60,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all filters from the configuration.
- filtersByName := make(map[string]Step[RequestType], len(confedFilters))
+ filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
filtersOrder := []string{}
for _, filterConfig := range confedFilters {
slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
@@ -72,7 +72,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
}
filter := makeFilter()
- filter = monitorStep(ctx, client, filterConfig, filter, pipelineMonitor)
+ filter = monitorFilter(filter, filterConfig.Name, pipelineMonitor)
if err := filter.Init(ctx, client, filterConfig); err != nil {
return PipelineInitResult[Pipeline[RequestType]]{
CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
@@ -84,7 +84,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
// Load all weighers from the configuration.
- weighersByName := make(map[string]Step[RequestType], len(confedWeighers))
+ weighersByName := make(map[string]Weigher[RequestType], len(confedWeighers))
weighersMultipliers := make(map[string]float64, len(confedWeighers))
weighersOrder := []string{}
var nonCriticalErr error
@@ -99,7 +99,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
weigher := makeWeigher()
// Validate that the weigher doesn't unexpectedly filter out hosts.
weigher = validateWeigher(weigher)
- weigher = monitorStep(ctx, client, weigherConfig, weigher, pipelineMonitor)
+ weigher = monitorWeigher(weigher, weigherConfig.Name, pipelineMonitor)
if err := weigher.Init(ctx, client, weigherConfig); err != nil {
nonCriticalErr = errors.New("failed to initialize weigher: " + err.Error())
continue // Weighers are optional.
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
index 3322c487f..8038bea9e 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -4,64 +4,20 @@
package lib
import (
- "context"
"log/slog"
"math"
"testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockFilter struct {
- err error
- name string
-}
-
-func (m *mockFilter) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- return nil
-}
-
-func (m *mockFilter) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- if m.err != nil {
- return nil, m.err
- }
- return &StepResult{
- Activations: map[string]float64{"host1": 0.0, "host2": 0.0},
- }, nil
-}
-
-type mockWeigher struct {
- err error
- name string
-}
-
-func (m *mockWeigher) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- return nil
-}
-
-func (m *mockWeigher) Run(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- if m.err != nil {
- return nil, m.err
- }
- return &StepResult{
- Activations: map[string]float64{"host1": 0.0, "host2": 1.0},
- }, nil
-}
-
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &filterWeigherPipeline[mockPipelineRequest]{
- filters: map[string]Step[mockPipelineRequest]{
- "mock_filter": &mockFilter{
- name: "mock_filter",
- },
+ filters: map[string]Filter[mockPipelineRequest]{
+ "mock_filter": &mockFilter[mockPipelineRequest]{},
},
filtersOrder: []string{"mock_filter"},
- weighers: map[string]Step[mockPipelineRequest]{
- "mock_weigher": &mockWeigher{
- name: "mock_weigher",
- },
+ weighers: map[string]Weigher[mockPipelineRequest]{
+ "mock_weigher": &mockWeigher[mockPipelineRequest]{},
},
weighersOrder: []string{"mock_weigher"},
}
@@ -136,7 +92,7 @@ func TestPipeline_NormalizeNovaWeights(t *testing.T) {
func TestPipeline_ApplyStepWeights(t *testing.T) {
p := &filterWeigherPipeline[mockPipelineRequest]{
- weighers: map[string]Step[mockPipelineRequest]{},
+ weighers: map[string]Weigher[mockPipelineRequest]{},
weighersOrder: []string{"step1", "step2"},
}
@@ -207,14 +163,12 @@ func TestPipeline_SortHostsByWeights(t *testing.T) {
}
func TestPipeline_RunFilters(t *testing.T) {
- mockStep := &mockFilter{
- name: "mock_filter",
- }
+ mockStep := &mockFilter[mockPipelineRequest]{}
p := &filterWeigherPipeline[mockPipelineRequest]{
filtersOrder: []string{
"mock_filter",
},
- filters: map[string]Step[mockPipelineRequest]{
+ filters: map[string]Filter[mockPipelineRequest]{
"mock_filter": mockStep,
},
}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index fa6c055ff..2980b1e84 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -4,12 +4,8 @@
package lib
import (
- "context"
"errors"
"log/slog"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
var (
@@ -17,12 +13,9 @@ var (
ErrStepSkipped = errors.New("step skipped")
)
-// Interface for a scheduler step.
+// Steps can be chained together to form a scheduling pipeline.
type Step[RequestType PipelineRequest] interface {
- // Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
-
- // Run this step of the scheduling pipeline.
+ // Run this step in the scheduling pipeline.
//
// The request is immutable and modifications are stored in the result.
// This allows steps to be run in parallel (e.g. weighers) without passing
@@ -32,7 +25,7 @@ type Step[RequestType PipelineRequest] interface {
// map of activations. I.e., filters implementing this interface should
// remove activations by omitting them from the returned map.
//
- // Weighers implementing this interface should adjust activation
+ // Filters implementing this interface should adjust activation
// values in the returned map, including all hosts from the request.
//
// A traceLog is provided that contains the global request id and should
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/step_monitor.go
index 2e361c1b3..3459d5455 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/step_monitor.go
@@ -4,7 +4,6 @@
package lib
import (
- "context"
"fmt"
"log/slog"
"maps"
@@ -14,9 +13,7 @@ import (
"strconv"
"strings"
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
// Wraps a scheduler step to monitor its execution.
@@ -29,8 +26,6 @@ type StepMonitor[RequestType PipelineRequest] struct {
// The name of this step.
stepName string
- // The wrapped scheduler step to monitor.
- Step Step[RequestType]
// A timer to measure how long the step takes to run.
runTimer prometheus.Observer
// A metric to monitor how much the step modifies the weights of the subjects.
@@ -43,35 +38,22 @@ type StepMonitor[RequestType PipelineRequest] struct {
stepImpactObserver *prometheus.HistogramVec
}
-// Initialize the wrapped step with the database and options.
-func (s *StepMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
- return s.Step.Init(ctx, client, step)
-}
-
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest](
- _ context.Context,
- _ client.Client,
- step v1alpha1.StepSpec,
- impl Step[RequestType],
- m PipelineMonitor,
-) *StepMonitor[RequestType] {
-
+func monitorStep[RequestType PipelineRequest](stepName string, m PipelineMonitor) *StepMonitor[RequestType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, stepName)
}
var removedSubjectsObserver prometheus.Observer
if m.stepRemovedSubjectsObserver != nil {
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
- WithLabelValues(m.PipelineName, step.Name)
+ WithLabelValues(m.PipelineName, stepName)
}
return &StepMonitor[RequestType]{
- Step: impl,
- stepName: step.Name,
- pipelineName: m.PipelineName,
runTimer: runTimer,
+ stepName: stepName,
+ pipelineName: m.PipelineName,
stepSubjectWeight: m.stepSubjectWeight,
removedSubjectsObserver: removedSubjectsObserver,
stepReorderingsObserver: m.stepReorderingsObserver,
@@ -80,14 +62,19 @@ func monitorStep[RequestType PipelineRequest](
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *StepMonitor[RequestType]) RunWrapped(
+ traceLog *slog.Logger,
+ request RequestType,
+ step Step[RequestType],
+) (*StepResult, error) {
+
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
defer timer.ObserveDuration()
}
inWeights := request.GetWeights()
- stepResult, err := s.Step.Run(traceLog, request)
+ stepResult, err := step.Run(traceLog, request)
if err != nil {
return nil, err
}
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/step_monitor_test.go
index c248ec576..bb05621d2 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/step_monitor_test.go
@@ -22,23 +22,23 @@ func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
monitor := &StepMonitor[mockPipelineRequest]{
- stepName: "mock_step",
- Step: &mockStep[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
- Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
- }, nil
- },
- },
+ stepName: "mock_step",
runTimer: runTimer,
stepSubjectWeight: nil,
removedSubjectsObserver: removedSubjectsObserver,
}
+ step := &mockWeigher[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ return &StepResult{
+ Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
+ }, nil
+ },
+ }
request := mockPipelineRequest{
Subjects: []string{"subject1", "subject2", "subject3"},
Weights: map[string]float64{"subject1": 0.2, "subject2": 0.1, "subject3": 0.0},
}
- if _, err := monitor.Run(slog.Default(), request); err != nil {
+ if _, err := monitor.RunWrapped(slog.Default(), request, step); err != nil {
t.Fatalf("Run() error = %v, want nil", err)
}
if len(removedSubjectsObserver.Observations) != 1 {
diff --git a/internal/scheduling/lib/weigher.go b/internal/scheduling/lib/weigher.go
new file mode 100644
index 000000000..dc7aa48d9
--- /dev/null
+++ b/internal/scheduling/lib/weigher.go
@@ -0,0 +1,19 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Interface for a weigher as part of the scheduling pipeline.
+type Weigher[RequestType PipelineRequest] interface {
+ Step[RequestType]
+
+ // Configure the step and initialize things like a database connection.
+ Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error
+}
diff --git a/internal/scheduling/lib/weigher_monitor.go b/internal/scheduling/lib/weigher_monitor.go
new file mode 100644
index 000000000..9838c3325
--- /dev/null
+++ b/internal/scheduling/lib/weigher_monitor.go
@@ -0,0 +1,43 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Wraps a scheduler weigher to monitor its execution.
+type WeigherMonitor[RequestType PipelineRequest] struct {
+ // The weigher to monitor.
+ weigher Weigher[RequestType]
+ // The monitor tracking the step's execution.
+ monitor *StepMonitor[RequestType]
+}
+
+// Wrap the given weigher with a monitor.
+func monitorWeigher[RequestType PipelineRequest](
+ weigher Weigher[RequestType],
+ stepName string,
+ m PipelineMonitor,
+) *WeigherMonitor[RequestType] {
+
+ return &WeigherMonitor[RequestType]{
+ weigher: weigher,
+ monitor: monitorStep[RequestType](stepName, m),
+ }
+}
+
+// Initialize the wrapped weigher.
+func (wm *WeigherMonitor[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+ return wm.weigher.Init(ctx, client, step)
+}
+
+// Run the weigher and observe its execution.
+func (wm *WeigherMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ return wm.monitor.RunWrapped(traceLog, request, wm.weigher)
+}
diff --git a/internal/scheduling/lib/weigher_test.go b/internal/scheduling/lib/weigher_test.go
new file mode 100644
index 000000000..7cc74f3c9
--- /dev/null
+++ b/internal/scheduling/lib/weigher_test.go
@@ -0,0 +1,24 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type mockWeigher[RequestType PipelineRequest] struct {
+ InitFunc func(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error
+ RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
+}
+
+func (m *mockWeigher[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+ return m.InitFunc(ctx, client, step)
+}
+func (m *mockWeigher[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ return m.RunFunc(traceLog, request)
+}
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index 629ba7b6b..a86e19ec5 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -15,17 +15,17 @@ import (
// Wrapper for scheduler steps that validates them before/after execution.
type WeigherValidator[RequestType PipelineRequest] struct {
// The wrapped weigher to validate.
- Weigher Step[RequestType]
+ Weigher Weigher[RequestType]
}
// Initialize the wrapped weigher with the database and options.
-func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
slog.Info("scheduler: init validation for step", "name", step.Name)
return s.Weigher.Init(ctx, client, step)
}
// Validate the wrapped weigher with the database and options.
-func validateWeigher[RequestType PipelineRequest](weigher Step[RequestType]) *WeigherValidator[RequestType] {
+func validateWeigher[RequestType PipelineRequest](weigher Weigher[RequestType]) *WeigherValidator[RequestType] {
return &WeigherValidator[RequestType]{Weigher: weigher}
}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index aa6cba851..d990826a2 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -10,7 +10,7 @@ import (
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockWeigher[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
@@ -45,7 +45,7 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockStep[mockPipelineRequest]{
+ mockStep := &mockWeigher[mockPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
return &StepResult{
Activations: map[string]float64{
From a61b457ef6c9a9067f854e20feca8ca728b32538 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 07:53:46 +0100
Subject: [PATCH 27/41] Fix descheduler code
---
.../scheduling/descheduling/nova/monitor.go | 4 +-
.../descheduling/nova/monitor_test.go | 16 +++---
.../scheduling/descheduling/nova/pipeline.go | 2 +-
.../nova/pipeline_controller_test.go | 10 ++--
.../descheduling/nova/pipeline_test.go | 12 ++---
.../descheduling/nova/plugins/base.go | 2 +-
.../descheduling/nova/plugins/base_test.go | 2 +-
.../nova/plugins/kvm/avoid_high_steal_pct.go | 2 +-
internal/scheduling/descheduling/nova/step.go | 2 +-
internal/scheduling/lib/detector.go | 19 -------
internal/scheduling/lib/detector_test.go | 24 ---------
internal/scheduling/lib/filter_test.go | 6 +++
internal/scheduling/lib/filter_validation.go | 51 +++++++++++++++++++
.../scheduling/lib/filter_validation_test.go | 4 ++
.../scheduling/lib/filter_weigher_pipeline.go | 1 +
.../lib/filter_weigher_pipeline_test.go | 38 ++++++++++++--
internal/scheduling/lib/weigher_test.go | 6 +++
17 files changed, 129 insertions(+), 72 deletions(-)
delete mode 100644 internal/scheduling/lib/detector.go
delete mode 100644 internal/scheduling/lib/detector_test.go
create mode 100644 internal/scheduling/lib/filter_validation.go
create mode 100644 internal/scheduling/lib/filter_validation_test.go
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index ea7a48163..6fd248321 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -83,7 +83,7 @@ type StepMonitor struct {
}
// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor {
+func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -101,7 +101,7 @@ func monitorStep(step Step, conf v1alpha1.StepSpec, monitor Monitor) StepMonitor
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return m.step.Init(ctx, client, step)
}
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/descheduling/nova/monitor_test.go
index 1f8e658de..ed7416848 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/descheduling/nova/monitor_test.go
@@ -80,7 +80,7 @@ type mockMonitorStep struct {
runCalled bool
}
-func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
m.initCalled = true
return m.initError
}
@@ -97,7 +97,7 @@ func TestMonitorStep(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -117,7 +117,7 @@ func TestMonitorStep(t *testing.T) {
func TestStepMonitor_Init(t *testing.T) {
monitor := NewPipelineMonitor()
step := &mockMonitorStep{}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
@@ -139,7 +139,7 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
step := &mockMonitorStep{
initError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
client := fake.NewClientBuilder().Build()
@@ -159,7 +159,7 @@ func TestStepMonitor_Run(t *testing.T) {
step := &mockMonitorStep{
decisions: decisions,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -189,7 +189,7 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
step := &mockMonitorStep{
runError: expectedErr,
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -214,7 +214,7 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
step := &mockMonitorStep{
decisions: []plugins.Decision{}, // Empty slice
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -242,7 +242,7 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
{VMID: "vm1", Reason: "test"},
},
}
- conf := v1alpha1.StepSpec{Name: "test-step"}
+ conf := v1alpha1.DetectorSpec{Name: "test-step"}
monitoredStep := monitorStep(step, conf, monitor)
// Should not panic with nil timers/counters
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index a0a0226d2..aff8c95b3 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -33,7 +33,7 @@ type Pipeline struct {
func (p *Pipeline) Init(
ctx context.Context,
- confedSteps []v1alpha1.StepSpec,
+ confedSteps []v1alpha1.DetectorSpec,
supportedSteps map[string]Step,
) (nonCriticalErr, criticalErr error) {
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 3c24ce25a..fe37883d1 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -33,20 +33,20 @@ type mockControllerStep struct{}
func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
return nil, nil
}
-func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
return nil
}
func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
- steps []v1alpha1.StepSpec
+ steps []v1alpha1.DetectorSpec
expectNonCriticalError bool
expectCriticalError bool
}{
{
name: "successful pipeline initialization",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
Name: "mock-step",
},
@@ -56,7 +56,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "unsupported step",
- steps: []v1alpha1.StepSpec{
+ steps: []v1alpha1.DetectorSpec{
{
Name: "unsupported",
},
@@ -66,7 +66,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
},
{
name: "empty steps",
- steps: []v1alpha1.StepSpec{},
+ steps: []v1alpha1.DetectorSpec{},
expectNonCriticalError: false,
expectCriticalError: false,
},
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index 5a0c9a027..d9797fa77 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -30,7 +30,7 @@ func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
return m.decisions, nil
}
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
if m.initError != nil {
return m.initError
}
@@ -42,7 +42,7 @@ func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
supportedSteps map[string]Step
- confedSteps []v1alpha1.StepSpec
+ confedSteps []v1alpha1.DetectorSpec
expectedNonCriticalError bool
expectedCriticalError bool
}{
@@ -51,7 +51,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "test-step",
}},
expectedNonCriticalError: false,
@@ -62,7 +62,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"test-step": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "unsupported-step",
}},
expectedNonCriticalError: true,
@@ -73,7 +73,7 @@ func TestPipeline_Init(t *testing.T) {
supportedSteps: map[string]Step{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
- confedSteps: []v1alpha1.StepSpec{{
+ confedSteps: []v1alpha1.DetectorSpec{{
Name: "failing-step",
}},
expectedNonCriticalError: true,
@@ -85,7 +85,7 @@ func TestPipeline_Init(t *testing.T) {
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
- confedSteps: []v1alpha1.StepSpec{
+ confedSteps: []v1alpha1.DetectorSpec{
{
Name: "step1",
},
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go
index 055b5b644..12ac275f2 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/descheduling/nova/plugins/base.go
@@ -25,7 +25,7 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
d.Client = client
opts := conf.NewRawOptsBytes(step.Params.Raw)
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/descheduling/nova/plugins/base_test.go
index 36d988850..9b052bbca 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/base_test.go
@@ -23,7 +23,7 @@ func (o MockOptions) Validate() error {
func TestDetector_Init(t *testing.T) {
step := Detector[MockOptions]{}
cl := fake.NewClientBuilder().Build()
- err := step.Init(t.Context(), cl, v1alpha1.StepSpec{
+ err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
Params: runtime.RawExtension{Raw: []byte(`{
"option1": "value1",
"option2": 2
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
index e5717edb1..7e0a9d7e6 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
@@ -26,7 +26,7 @@ type AvoidHighStealPctStep struct {
}
// Initialize the step and validate that all required knowledges are ready.
-func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error {
+func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
if err := s.Detector.Init(ctx, client, step); err != nil {
return err
}
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/step.go
index 7c53bc991..552edf87b 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/step.go
@@ -21,5 +21,5 @@ type Step interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.StepSpec) error
+ Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
}
diff --git a/internal/scheduling/lib/detector.go b/internal/scheduling/lib/detector.go
deleted file mode 100644
index 371cf02f9..000000000
--- a/internal/scheduling/lib/detector.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Interface for a detector as part of the (de)scheduling pipeline.
-type Detector[RequestType PipelineRequest] interface {
- Step[RequestType]
-
- // Configure the step and initialize things like a database connection.
- Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
-}
diff --git a/internal/scheduling/lib/detector_test.go b/internal/scheduling/lib/detector_test.go
deleted file mode 100644
index b6cc118b0..000000000
--- a/internal/scheduling/lib/detector_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
- "log/slog"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-type mockDetector[RequestType PipelineRequest] struct {
- InitFunc func(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
- RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
-}
-
-func (m *mockDetector[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
- return m.InitFunc(ctx, client, step)
-}
-func (m *mockDetector[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
- return m.RunFunc(traceLog, request)
-}
diff --git a/internal/scheduling/lib/filter_test.go b/internal/scheduling/lib/filter_test.go
index 37b1d58e5..17098fcc4 100644
--- a/internal/scheduling/lib/filter_test.go
+++ b/internal/scheduling/lib/filter_test.go
@@ -17,8 +17,14 @@ type mockFilter[RequestType PipelineRequest] struct {
}
func (m *mockFilter[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+ if m.InitFunc == nil {
+ return nil
+ }
return m.InitFunc(ctx, client, step)
}
func (m *mockFilter[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ if m.RunFunc == nil {
+ return &StepResult{}, nil
+ }
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/filter_validation.go b/internal/scheduling/lib/filter_validation.go
new file mode 100644
index 000000000..1bbb794ae
--- /dev/null
+++ b/internal/scheduling/lib/filter_validation.go
@@ -0,0 +1,51 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Wrapper for scheduler steps that validates them before/after execution.
+type FilterValidator[RequestType PipelineRequest] struct {
+ // The wrapped filter to validate.
+ Filter Filter[RequestType]
+}
+
+// Initialize the wrapped filter with the database and options.
+func (s *FilterValidator[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+ slog.Info("scheduler: init validation for step", "name", step.Name)
+ return s.Filter.Init(ctx, client, step)
+}
+
+// Validate the wrapped filter with the database and options.
+func validateFilter[RequestType PipelineRequest](filter Filter[RequestType]) *FilterValidator[RequestType] {
+ return &FilterValidator[RequestType]{Filter: filter}
+}
+
+// Run the filter and validate what happens.
+func (s *FilterValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ result, err := s.Filter.Run(traceLog, request)
+ if err != nil {
+ return nil, err
+ }
+ // Note that for some schedulers the same subject (e.g. compute host) may
+ // appear multiple times if there is a substruct (e.g. hypervisor hostname).
+ // Since cortex will only schedule on the subject level and not below,
+ // we need to deduplicate the subjects first before the validation.
+ deduplicated := map[string]struct{}{}
+ for _, subject := range request.GetSubjects() {
+ deduplicated[subject] = struct{}{}
+ }
+ // Filters can only remove subjects, not add new ones.
+ if len(result.Activations) > len(deduplicated) {
+ return nil, errors.New("safety: number of subjects increased during step execution")
+ }
+ return result, nil
+}
diff --git a/internal/scheduling/lib/filter_validation_test.go b/internal/scheduling/lib/filter_validation_test.go
new file mode 100644
index 000000000..f5e4e9efe
--- /dev/null
+++ b/internal/scheduling/lib/filter_validation_test.go
@@ -0,0 +1,4 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
index 42a399e70..10935938a 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -73,6 +73,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
}
filter := makeFilter()
filter = monitorFilter(filter, filterConfig.Name, pipelineMonitor)
+ filter = validateFilter(filter)
if err := filter.Init(ctx, client, filterConfig); err != nil {
return PipelineInitResult[Pipeline[RequestType]]{
CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
index 8038bea9e..e56db31ba 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -13,11 +13,33 @@ func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
pipeline := &filterWeigherPipeline[mockPipelineRequest]{
filters: map[string]Filter[mockPipelineRequest]{
- "mock_filter": &mockFilter[mockPipelineRequest]{},
+ "mock_filter": &mockFilter[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ // Filter out host3
+ return &StepResult{
+ Activations: map[string]float64{
+ "host1": 0.0,
+ "host2": 0.0,
+ },
+ }, nil
+ },
+ },
},
filtersOrder: []string{"mock_filter"},
weighers: map[string]Weigher[mockPipelineRequest]{
- "mock_weigher": &mockWeigher[mockPipelineRequest]{},
+ "mock_weigher": &mockWeigher[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ // Assign weights to hosts
+ activations := map[string]float64{
+ "host1": 0.5,
+ "host2": 1.0,
+ "host3": -0.5,
+ }
+ return &StepResult{
+ Activations: activations,
+ }, nil
+ },
+ },
},
weighersOrder: []string{"mock_weigher"},
}
@@ -163,7 +185,17 @@ func TestPipeline_SortHostsByWeights(t *testing.T) {
}
func TestPipeline_RunFilters(t *testing.T) {
- mockStep := &mockFilter[mockPipelineRequest]{}
+ mockStep := &mockFilter[mockPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ // Filter out host3
+ return &StepResult{
+ Activations: map[string]float64{
+ "host1": 0.0,
+ "host2": 0.0,
+ },
+ }, nil
+ },
+ }
p := &filterWeigherPipeline[mockPipelineRequest]{
filtersOrder: []string{
"mock_filter",
diff --git a/internal/scheduling/lib/weigher_test.go b/internal/scheduling/lib/weigher_test.go
index 7cc74f3c9..70f8e4285 100644
--- a/internal/scheduling/lib/weigher_test.go
+++ b/internal/scheduling/lib/weigher_test.go
@@ -17,8 +17,14 @@ type mockWeigher[RequestType PipelineRequest] struct {
}
func (m *mockWeigher[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+ if m.InitFunc == nil {
+ return nil
+ }
return m.InitFunc(ctx, client, step)
}
func (m *mockWeigher[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+ if m.RunFunc == nil {
+ return &StepResult{}, nil
+ }
return m.RunFunc(traceLog, request)
}
From 4885dc9c1dbac855bbc75f09ef7956387fd41a3b Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 07:59:47 +0100
Subject: [PATCH 28/41] Rename descheduler/step -> descheduler/detector
---
.../nova/{step.go => detector.go} | 8 +-----
.../nova/{step_test.go => detector_test.go} | 0
.../scheduling/descheduling/nova/monitor.go | 4 +--
.../scheduling/descheduling/nova/pipeline.go | 9 ++++---
.../descheduling/nova/pipeline_controller.go | 2 +-
.../nova/pipeline_controller_test.go | 2 +-
.../descheduling/nova/pipeline_test.go | 27 ++++++++++---------
.../descheduling/nova/supported_steps.go | 2 +-
internal/scheduling/lib/errors.go | 13 +++++++++
internal/scheduling/lib/step.go | 6 -----
10 files changed, 38 insertions(+), 35 deletions(-)
rename internal/scheduling/descheduling/nova/{step.go => detector.go} (75%)
rename internal/scheduling/descheduling/nova/{step_test.go => detector_test.go} (100%)
create mode 100644 internal/scheduling/lib/errors.go
diff --git a/internal/scheduling/descheduling/nova/step.go b/internal/scheduling/descheduling/nova/detector.go
similarity index 75%
rename from internal/scheduling/descheduling/nova/step.go
rename to internal/scheduling/descheduling/nova/detector.go
index 552edf87b..f6e0e455c 100644
--- a/internal/scheduling/descheduling/nova/step.go
+++ b/internal/scheduling/descheduling/nova/detector.go
@@ -5,19 +5,13 @@ package nova
import (
"context"
- "errors"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-var (
- // This error is returned from the step at any time when the step should be skipped.
- ErrStepSkipped = errors.New("step skipped")
-)
-
-type Step interface {
+type Detector interface {
// Get the VMs on their current hosts that should be considered for descheduling.
Run() ([]plugins.Decision, error)
// Configure the step with a database and options.
diff --git a/internal/scheduling/descheduling/nova/step_test.go b/internal/scheduling/descheduling/nova/detector_test.go
similarity index 100%
rename from internal/scheduling/descheduling/nova/step_test.go
rename to internal/scheduling/descheduling/nova/detector_test.go
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/descheduling/nova/monitor.go
index 6fd248321..ae5daf87b 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/descheduling/nova/monitor.go
@@ -73,7 +73,7 @@ func (m *Monitor) Collect(ch chan<- prometheus.Metric) {
type StepMonitor struct {
// The step being monitored.
- step Step
+ step Detector
// The name of this step.
stepName string
// A timer to measure how long the step takes to run.
@@ -83,7 +83,7 @@ type StepMonitor struct {
}
// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Step, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
+func monitorStep(step Detector, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/descheduling/nova/pipeline.go
index aff8c95b3..60e1d8c93 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/descheduling/nova/pipeline.go
@@ -13,6 +13,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -28,18 +29,18 @@ type Pipeline struct {
// The order in which scheduler steps are applied, by their step name.
order []string
// The steps by their name.
- steps map[string]Step
+ steps map[string]Detector
}
func (p *Pipeline) Init(
ctx context.Context,
confedSteps []v1alpha1.DetectorSpec,
- supportedSteps map[string]Step,
+ supportedSteps map[string]Detector,
) (nonCriticalErr, criticalErr error) {
p.order = []string{}
// Load all steps from the configuration.
- p.steps = make(map[string]Step, len(confedSteps))
+ p.steps = make(map[string]Detector, len(confedSteps))
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
@@ -72,7 +73,7 @@ func (p *Pipeline) run() map[string][]plugins.Decision {
wg.Go(func() {
slog.Info("descheduler: running step")
decisions, err := step.Run()
- if errors.Is(err, ErrStepSkipped) {
+ if errors.Is(err, lib.ErrStepSkipped) {
slog.Info("descheduler: step skipped")
return
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index 1e10a4ad5..f1b575983 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -56,7 +56,7 @@ func (c *DeschedulingsPipelineController) InitPipeline(
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedSteps)
+ nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
return lib.PipelineInitResult[*Pipeline]{
Pipeline: pipeline,
NonCriticalErr: nonCriticalErr,
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index fe37883d1..f48924ac4 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -83,7 +83,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
CycleDetector: controller.CycleDetector,
Monitor: controller.Monitor,
}
- nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]Step{
+ nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]Detector{
"mock-step": &mockControllerStep{},
})
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
index d9797fa77..06058b312 100644
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_test.go
@@ -11,6 +11,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -41,14 +42,14 @@ func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step
func TestPipeline_Init(t *testing.T) {
tests := []struct {
name string
- supportedSteps map[string]Step
+ supportedSteps map[string]Detector
confedSteps []v1alpha1.DetectorSpec
expectedNonCriticalError bool
expectedCriticalError bool
}{
{
name: "successful initialization with single step",
- supportedSteps: map[string]Step{
+ supportedSteps: map[string]Detector{
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.DetectorSpec{{
@@ -59,7 +60,7 @@ func TestPipeline_Init(t *testing.T) {
},
{
name: "initialization with unsupported step",
- supportedSteps: map[string]Step{
+ supportedSteps: map[string]Detector{
"test-step": &mockPipelineStep{},
},
confedSteps: []v1alpha1.DetectorSpec{{
@@ -70,7 +71,7 @@ func TestPipeline_Init(t *testing.T) {
},
{
name: "initialization with step init error",
- supportedSteps: map[string]Step{
+ supportedSteps: map[string]Detector{
"failing-step": &mockPipelineStep{initError: errors.New("init failed")},
},
confedSteps: []v1alpha1.DetectorSpec{{
@@ -81,7 +82,7 @@ func TestPipeline_Init(t *testing.T) {
},
{
name: "initialization with multiple steps",
- supportedSteps: map[string]Step{
+ supportedSteps: map[string]Detector{
"step1": &mockPipelineStep{},
"step2": &mockPipelineStep{},
},
@@ -135,13 +136,13 @@ func TestPipeline_Init(t *testing.T) {
func TestPipeline_run(t *testing.T) {
tests := []struct {
name string
- steps map[string]Step
+ steps map[string]Detector
order []string
expectedResults map[string][]plugins.Decision
}{
{
name: "successful run with single step",
- steps: map[string]Step{
+ steps: map[string]Detector{
"test-step": &mockPipelineStep{
decisions: []plugins.Decision{
{VMID: "vm1", Reason: "test reason", Host: "host1"},
@@ -157,7 +158,7 @@ func TestPipeline_run(t *testing.T) {
},
{
name: "run with step error",
- steps: map[string]Step{
+ steps: map[string]Detector{
"failing-step": &mockPipelineStep{
runError: errors.New("step failed"),
},
@@ -167,9 +168,9 @@ func TestPipeline_run(t *testing.T) {
},
{
name: "run with step skipped",
- steps: map[string]Step{
+ steps: map[string]Detector{
"skipped-step": &mockPipelineStep{
- runError: ErrStepSkipped,
+ runError: lib.ErrStepSkipped,
},
},
order: []string{"skipped-step"},
@@ -177,7 +178,7 @@ func TestPipeline_run(t *testing.T) {
},
{
name: "run with multiple steps",
- steps: map[string]Step{
+ steps: map[string]Detector{
"step1": &mockPipelineStep{
decisions: []plugins.Decision{
{VMID: "vm1", Reason: "reason1", Host: "host1"},
@@ -318,14 +319,14 @@ func TestPipeline_combine(t *testing.T) {
func TestSupportedSteps(t *testing.T) {
// Test that SupportedSteps is properly initialized
- if len(supportedSteps) == 0 {
+ if len(supportedDetectors) == 0 {
t.Error("SupportedSteps should not be empty")
}
}
// Benchmark tests
func BenchmarkPipeline_run(b *testing.B) {
- steps := map[string]Step{
+ steps := map[string]Detector{
"step1": &mockPipelineStep{
decisions: []plugins.Decision{
{VMID: "vm1", Reason: "bench reason", Host: "host1"},
diff --git a/internal/scheduling/descheduling/nova/supported_steps.go b/internal/scheduling/descheduling/nova/supported_steps.go
index fc9fb0d29..f56562f50 100644
--- a/internal/scheduling/descheduling/nova/supported_steps.go
+++ b/internal/scheduling/descheduling/nova/supported_steps.go
@@ -7,6 +7,6 @@ import "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/p
// Configuration of steps supported by the descheduler.
// The steps actually used by the scheduler are defined through the configuration file.
-var supportedSteps = map[string]Step{
+var supportedDetectors = map[string]Detector{
"avoid_high_steal_pct": &kvm.AvoidHighStealPctStep{},
}
diff --git a/internal/scheduling/lib/errors.go b/internal/scheduling/lib/errors.go
new file mode 100644
index 000000000..59735f9a5
--- /dev/null
+++ b/internal/scheduling/lib/errors.go
@@ -0,0 +1,13 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "errors"
+)
+
+var (
+ // This error is returned from the step at any time when the step should be skipped.
+ ErrStepSkipped = errors.New("step skipped")
+)
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
index 2980b1e84..328e1eff9 100644
--- a/internal/scheduling/lib/step.go
+++ b/internal/scheduling/lib/step.go
@@ -4,15 +4,9 @@
package lib
import (
- "errors"
"log/slog"
)
-var (
- // This error is returned from the step at any time when the step should be skipped.
- ErrStepSkipped = errors.New("step skipped")
-)
-
// Steps can be chained together to form a scheduling pipeline.
type Step[RequestType PipelineRequest] interface {
// Run this step in the scheduling pipeline.
From 8b1159f8a0b6da8bcaad974a3287a1d334cf5153 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 09:37:23 +0100
Subject: [PATCH 29/41] Pull descheduler code into lib for reusability and
better alignment with api structure
---
api/delegation/cinder/messages.go | 2 +-
api/delegation/ironcore/messages.go | 2 +-
api/delegation/manila/messages.go | 2 +-
api/delegation/nova/messages.go | 2 +-
api/delegation/pods/messages.go | 2 +-
cmd/main.go | 2 +-
.../decisions/cinder/pipeline_controller.go | 6 +-
.../cinder/pipeline_controller_test.go | 14 +-
.../scheduling/decisions/machines/noop.go | 6 +-
.../decisions/machines/pipeline_controller.go | 6 +-
.../machines/pipeline_controller_test.go | 16 +-
.../decisions/manila/pipeline_controller.go | 6 +-
.../manila/pipeline_controller_test.go | 14 +-
.../weighers/netapp_cpu_usage_balancing.go | 2 +-
.../decisions/nova/pipeline_controller.go | 6 +-
.../nova/pipeline_controller_test.go | 14 +-
.../filters/filter_allowed_projects.go | 4 +-
.../plugins/filters/filter_capabilities.go | 4 +-
.../nova/plugins/filters/filter_correct_az.go | 4 +-
.../filters/filter_external_customer.go | 2 +-
.../filters/filter_has_accelerators.go | 4 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filters/filter_has_requested_traits.go | 4 +-
.../filters/filter_host_instructions.go | 4 +-
.../filters/filter_instance_group_affinity.go | 4 +-
.../filter_instance_group_anti_affinity.go | 4 +-
.../plugins/filters/filter_live_migratable.go | 4 +-
.../filters/filter_live_migratable_test.go | 12 +-
.../plugins/filters/filter_maintenance.go | 4 +-
.../filters/filter_packed_virtqueue.go | 4 +-
.../filters/filter_requested_destination.go | 4 +-
.../filter_requested_destination_test.go | 8 +-
.../filters/filter_status_conditions.go | 4 +-
.../vmware_anti_affinity_noisy_projects.go | 2 +-
.../vmware_avoid_long_term_contended_hosts.go | 2 +-
...vmware_avoid_short_term_contended_hosts.go | 2 +-
.../vmware_general_purpose_balancing.go | 2 +-
.../weighers/vmware_hana_binpacking.go | 2 +-
.../decisions/pods/pipeline_controller.go | 6 +-
.../pods/pipeline_controller_test.go | 16 +-
.../plugins/filters/filter_node_affinity.go | 6 +-
.../plugins/filters/filter_node_available.go | 6 +-
.../plugins/filters/filter_node_capacity.go | 6 +-
.../pods/plugins/filters/filter_noop.go | 6 +-
.../pods/plugins/filters/filter_taint.go | 6 +-
.../pods/plugins/weighers/binpack.go | 2 +-
.../descheduling/nova/cycle_detector.go | 14 +-
.../descheduling/nova/cycle_detector_test.go | 54 +--
.../scheduling/descheduling/nova/detector.go | 19 -
.../descheduling/nova/detector_test.go | 58 ---
.../scheduling/descheduling/nova/executor.go | 4 +-
.../descheduling/nova/executor_test.go | 6 +-
.../descheduling/nova/pipeline_controller.go | 56 ++-
.../nova/pipeline_controller_test.go | 12 +-
.../descheduling/nova/pipeline_test.go | 371 ------------------
.../nova/plugins/kvm/avoid_high_steal_pct.go | 11 +-
.../plugins/kvm/avoid_high_steal_pct_test.go | 66 ++--
.../descheduling/nova/plugins/vm_detection.go | 20 +
.../descheduling/nova/supported_steps.go | 8 +-
internal/scheduling/lib/base_filter.go | 22 --
internal/scheduling/lib/base_step.go | 56 ---
internal/scheduling/lib/base_weigher.go | 50 ---
internal/scheduling/lib/cycle_detector.go | 18 +
.../nova/plugins/base.go => lib/detector.go} | 36 +-
.../monitor.go => lib/detector_monitor.go} | 43 +-
.../detector_monitor_test.go} | 59 ++-
.../pipeline.go => lib/detector_pipeline.go} | 104 ++---
.../scheduling/lib/detector_pipeline_test.go | 4 +
.../base_test.go => lib/detector_test.go} | 19 +-
internal/scheduling/lib/filter.go | 15 +-
internal/scheduling/lib/filter_monitor.go | 10 +-
internal/scheduling/lib/filter_test.go | 8 +-
internal/scheduling/lib/filter_validation.go | 6 +-
.../scheduling/lib/filter_weigher_pipeline.go | 23 +-
....go => filter_weigher_pipeline_monitor.go} | 14 +-
...> filter_weigher_pipeline_monitor_test.go} | 0
....go => filter_weigher_pipeline_request.go} | 4 +-
.../filter_weigher_pipeline_request_test.go | 29 ++
.../lib/filter_weigher_pipeline_step.go | 77 ++++
...> filter_weigher_pipeline_step_monitor.go} | 12 +-
...ter_weigher_pipeline_step_monitor_test.go} | 10 +-
...o => filter_weigher_pipeline_step_opts.go} | 6 +-
...filter_weigher_pipeline_step_opts_test.go} | 0
...=> filter_weigher_pipeline_step_result.go} | 6 +-
...o => filter_weigher_pipeline_step_test.go} | 0
.../lib/filter_weigher_pipeline_test.go | 47 ++-
internal/scheduling/lib/pipeline.go | 13 -
...e_controller.go => pipeline_controller.go} | 0
...er_test.go => pipeline_controller_test.go} | 0
.../scheduling/lib/pipeline_request_test.go | 29 --
internal/scheduling/lib/pipeline_test.go | 9 -
internal/scheduling/lib/step.go | 28 --
internal/scheduling/lib/weigher.go | 43 +-
internal/scheduling/lib/weigher_monitor.go | 10 +-
internal/scheduling/lib/weigher_test.go | 8 +-
internal/scheduling/lib/weigher_validation.go | 6 +-
.../scheduling/lib/weigher_validation_test.go | 20 +-
97 files changed, 684 insertions(+), 1101 deletions(-)
delete mode 100644 internal/scheduling/descheduling/nova/detector.go
delete mode 100644 internal/scheduling/descheduling/nova/detector_test.go
delete mode 100644 internal/scheduling/descheduling/nova/pipeline_test.go
create mode 100644 internal/scheduling/descheduling/nova/plugins/vm_detection.go
delete mode 100644 internal/scheduling/lib/base_filter.go
delete mode 100644 internal/scheduling/lib/base_step.go
delete mode 100644 internal/scheduling/lib/base_weigher.go
create mode 100644 internal/scheduling/lib/cycle_detector.go
rename internal/scheduling/{descheduling/nova/plugins/base.go => lib/detector.go} (63%)
rename internal/scheduling/{descheduling/nova/monitor.go => lib/detector_monitor.go} (76%)
rename internal/scheduling/{descheduling/nova/monitor_test.go => lib/detector_monitor_test.go} (80%)
rename internal/scheduling/{descheduling/nova/pipeline.go => lib/detector_pipeline.go} (50%)
create mode 100644 internal/scheduling/lib/detector_pipeline_test.go
rename internal/scheduling/{descheduling/nova/plugins/base_test.go => lib/detector_test.go} (60%)
rename internal/scheduling/lib/{pipeline_monitor.go => filter_weigher_pipeline_monitor.go} (91%)
rename internal/scheduling/lib/{pipeline_monitor_test.go => filter_weigher_pipeline_monitor_test.go} (100%)
rename internal/scheduling/lib/{pipeline_request.go => filter_weigher_pipeline_request.go} (84%)
create mode 100644 internal/scheduling/lib/filter_weigher_pipeline_request_test.go
create mode 100644 internal/scheduling/lib/filter_weigher_pipeline_step.go
rename internal/scheduling/lib/{step_monitor.go => filter_weigher_pipeline_step_monitor.go} (94%)
rename internal/scheduling/lib/{step_monitor_test.go => filter_weigher_pipeline_step_monitor_test.go} (92%)
rename internal/scheduling/lib/{step_opts.go => filter_weigher_pipeline_step_opts.go} (61%)
rename internal/scheduling/lib/{step_opts_test.go => filter_weigher_pipeline_step_opts_test.go} (100%)
rename internal/scheduling/lib/{result.go => filter_weigher_pipeline_step_result.go} (82%)
rename internal/scheduling/lib/{base_step_test.go => filter_weigher_pipeline_step_test.go} (100%)
delete mode 100644 internal/scheduling/lib/pipeline.go
rename internal/scheduling/lib/{base_pipeline_controller.go => pipeline_controller.go} (100%)
rename internal/scheduling/lib/{base_pipeline_controller_test.go => pipeline_controller_test.go} (100%)
delete mode 100644 internal/scheduling/lib/pipeline_request_test.go
delete mode 100644 internal/scheduling/lib/pipeline_test.go
delete mode 100644 internal/scheduling/lib/step.go
diff --git a/api/delegation/cinder/messages.go b/api/delegation/cinder/messages.go
index 5c9b0225f..cc4a5b5dc 100644
--- a/api/delegation/cinder/messages.go
+++ b/api/delegation/cinder/messages.go
@@ -50,7 +50,7 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
-func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.FilterWeigherPipelineRequest {
filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
for _, host := range r.Hosts {
if _, exists := includedSubjects[host.VolumeHost]; exists {
diff --git a/api/delegation/ironcore/messages.go b/api/delegation/ironcore/messages.go
index 61d90b097..c346f20e8 100644
--- a/api/delegation/ironcore/messages.go
+++ b/api/delegation/ironcore/messages.go
@@ -32,7 +32,7 @@ func (r MachinePipelineRequest) GetWeights() map[string]float64 {
func (r MachinePipelineRequest) GetTraceLogArgs() []slog.Attr {
return []slog.Attr{}
}
-func (r MachinePipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+func (r MachinePipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.FilterWeigherPipelineRequest {
filteredPools := make([]ironcorev1alpha1.MachinePool, 0, len(includedSubjects))
for _, pool := range r.Pools {
if _, exists := includedSubjects[pool.Name]; exists {
diff --git a/api/delegation/manila/messages.go b/api/delegation/manila/messages.go
index 9b8afbaed..c21a701ab 100644
--- a/api/delegation/manila/messages.go
+++ b/api/delegation/manila/messages.go
@@ -50,7 +50,7 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
-func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.FilterWeigherPipelineRequest {
filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
for _, host := range r.Hosts {
if _, exists := includedSubjects[host.ShareHost]; exists {
diff --git a/api/delegation/nova/messages.go b/api/delegation/nova/messages.go
index 117071a8f..8e9a097e2 100644
--- a/api/delegation/nova/messages.go
+++ b/api/delegation/nova/messages.go
@@ -71,7 +71,7 @@ func (r ExternalSchedulerRequest) GetTraceLogArgs() []slog.Attr {
slog.String("project", r.Context.ProjectID),
}
}
-func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+func (r ExternalSchedulerRequest) FilterSubjects(includedSubjects map[string]float64) lib.FilterWeigherPipelineRequest {
filteredHosts := make([]ExternalSchedulerHost, 0, len(includedSubjects))
for _, host := range r.Hosts {
if _, exists := includedSubjects[host.ComputeHost]; exists {
diff --git a/api/delegation/pods/messages.go b/api/delegation/pods/messages.go
index c1ae8fe5c..862aa7a40 100644
--- a/api/delegation/pods/messages.go
+++ b/api/delegation/pods/messages.go
@@ -34,7 +34,7 @@ func (r PodPipelineRequest) GetWeights() map[string]float64 {
func (r PodPipelineRequest) GetTraceLogArgs() []slog.Attr {
return []slog.Attr{}
}
-func (r PodPipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.PipelineRequest {
+func (r PodPipelineRequest) FilterSubjects(includedSubjects map[string]float64) lib.FilterWeigherPipelineRequest {
filteredNodes := make([]corev1.Node, 0, len(includedSubjects))
for _, node := range r.Nodes {
if _, exists := includedSubjects[node.Name]; exists {
diff --git a/cmd/main.go b/cmd/main.go
index 61930393d..304424520 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -309,7 +309,7 @@ func main() {
}
if slices.Contains(config.EnabledControllers, "nova-deschedulings-pipeline-controller") {
// Deschedulings controller
- monitor := deschedulingnova.NewPipelineMonitor()
+ monitor := schedulinglib.NewDetectorPipelineMonitor()
metrics.Registry.MustRegister(&monitor)
deschedulingsController := &deschedulingnova.DeschedulingsPipelineController{
Monitor: monitor,
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller.go b/internal/scheduling/decisions/cinder/pipeline_controller.go
index 279cb3a68..a75989347 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller.go
@@ -35,13 +35,13 @@ import (
// reconfigure the pipelines as needed.
type DecisionPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]
+ lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
// Mutex to only allow one process at a time
processMu sync.Mutex
// Monitor to pass down to all pipelines.
- Monitor lib.PipelineMonitor
+ Monitor lib.FilterWeigherPipelineMonitor
// Config for the scheduling operator.
Conf conf.Config
}
@@ -144,7 +144,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
+) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index b5b1daf66..cfc42c8bf 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -157,11 +157,11 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
},
@@ -397,12 +397,12 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainCinder,
},
@@ -471,7 +471,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
func TestDecisionPipelineController_InitPipeline(t *testing.T) {
controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
tests := []struct {
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/decisions/machines/noop.go
index eeec4f848..ce4d52226 100644
--- a/internal/scheduling/decisions/machines/noop.go
+++ b/internal/scheduling/decisions/machines/noop.go
@@ -24,12 +24,12 @@ func (f *NoopFilter) Init(ctx context.Context, client client.Client, filter v1al
// not in the map are considered as filtered out.
// Provide a traceLog that contains the global request id and should
// be used to log the step's execution.
-func (NoopFilter) Run(traceLog *slog.Logger, request ironcore.MachinePipelineRequest) (*lib.StepResult, error) {
+func (NoopFilter) Run(traceLog *slog.Logger, request ironcore.MachinePipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64, len(request.Pools))
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
// Usually you would do some filtering here, or adjust the weights.
for _, pool := range request.Pools {
activations[pool.Name] = 1.0
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller.go b/internal/scheduling/decisions/machines/pipeline_controller.go
index 5116ec097..1e6bc712f 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller.go
@@ -39,7 +39,7 @@ import (
// reconfigure the pipelines as needed.
type DecisionPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[lib.Pipeline[ironcore.MachinePipelineRequest]]
+ lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]
// Mutex to only allow one process at a time
processMu sync.Mutex
@@ -47,7 +47,7 @@ type DecisionPipelineController struct {
// Config for the scheduling operator.
Conf conf.Config
// Monitor to pass down to all pipelines.
- Monitor lib.PipelineMonitor
+ Monitor lib.FilterWeigherPipelineMonitor
}
// The type of pipeline this controller manages.
@@ -186,7 +186,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[lib.Pipeline[ironcore.MachinePipelineRequest]] {
+) lib.PipelineInitResult[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]] {
return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index a12ade07d..dc35f30d6 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -121,15 +121,15 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[ironcore.MachinePipelineRequest]]{
- Pipelines: map[string]lib.Pipeline[ironcore.MachinePipelineRequest]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]{
+ Pipelines: map[string]lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]{
"machines-scheduler": createMockPipeline(),
},
},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client
@@ -206,7 +206,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
func TestDecisionPipelineController_InitPipeline(t *testing.T) {
controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
tests := []struct {
@@ -427,14 +427,14 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[ironcore.MachinePipelineRequest]]{
- Pipelines: map[string]lib.Pipeline[ironcore.MachinePipelineRequest]{},
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]{
+ Pipelines: map[string]lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]{},
PipelineConfigs: map[string]v1alpha1.Pipeline{},
},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainMachines,
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client
@@ -548,7 +548,7 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
}
// Helper function to create a mock pipeline that works with the ironcore types
-func createMockPipeline() lib.Pipeline[ironcore.MachinePipelineRequest] {
+func createMockPipeline() lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest] {
return &mockMachinePipeline{}
}
diff --git a/internal/scheduling/decisions/manila/pipeline_controller.go b/internal/scheduling/decisions/manila/pipeline_controller.go
index 9f566e8f1..dde661c89 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller.go
@@ -35,13 +35,13 @@ import (
// reconfigure the pipelines as needed.
type DecisionPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]
+ lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
// Mutex to only allow one process at a time
processMu sync.Mutex
// Monitor to pass down to all pipelines.
- Monitor lib.PipelineMonitor
+ Monitor lib.FilterWeigherPipelineMonitor
// Config for the scheduling operator.
Conf conf.Config
}
@@ -144,7 +144,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
+) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/decisions/manila/pipeline_controller_test.go
index 9b0f790e1..c9cccb041 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/manila/pipeline_controller_test.go
@@ -159,11 +159,11 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainManila,
},
@@ -394,12 +394,12 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainManila,
},
@@ -549,7 +549,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client // Through basepipelinecontroller
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
index 11e9cca46..5eb9474b5 100644
--- a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
+++ b/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
@@ -61,7 +61,7 @@ func (s *NetappCPUUsageBalancingStep) Init(ctx context.Context, client client.Cl
}
// Downvote hosts that are highly contended.
-func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *NetappCPUUsageBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
result.Statistics["avg cpu contention"] = s.PrepareStats(request, "%")
result.Statistics["max cpu contention"] = s.PrepareStats(request, "%")
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go
index fcdcbbd5c..17a876302 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller.go
@@ -36,13 +36,13 @@ import (
// reconfigure the pipelines as needed.
type DecisionPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]
+ lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
// Mutex to only allow one process at a time
processMu sync.Mutex
// Monitor to pass down to all pipelines.
- Monitor lib.PipelineMonitor
+ Monitor lib.FilterWeigherPipelineMonitor
// Config for the scheduling operator.
Conf conf.Config
}
@@ -151,7 +151,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[lib.Pipeline[api.ExternalSchedulerRequest]] {
+) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go
index 21d8b0a66..a0327e36d 100644
--- a/internal/scheduling/decisions/nova/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go
@@ -196,11 +196,11 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
},
@@ -259,7 +259,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
func TestDecisionPipelineController_InitPipeline(t *testing.T) {
controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
tests := []struct {
@@ -663,12 +663,12 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[api.ExternalSchedulerRequest]]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
- Pipelines: make(map[string]lib.Pipeline[api.ExternalSchedulerRequest]),
+ Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
PipelineConfigs: make(map[string]v1alpha1.Pipeline),
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
},
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
index 96815c618..16c75d5d7 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
@@ -14,12 +14,12 @@ import (
)
type FilterAllowedProjectsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Lock certain hosts for certain projects, based on the hypervisor spec.
// Note that hosts without specified projects are still accessible.
-func (s *FilterAllowedProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterAllowedProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.ProjectID == "" {
traceLog.Info("no project ID in request, skipping filter")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
index 80dfa5b3c..0366df41f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
@@ -15,7 +15,7 @@ import (
)
type FilterCapabilitiesStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Get the provided capabilities of a hypervisor resource in the format Nova expects.
@@ -44,7 +44,7 @@ func hvToNovaCapabilities(hv hv1.Hypervisor) (map[string]string, error) {
// Check the capabilities of each host and if they match the extra spec provided
// in the request spec flavor.
-func (s *FilterCapabilitiesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterCapabilitiesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
requestedCapabilities := request.Spec.Data.Flavor.Data.ExtraSpecs
if len(requestedCapabilities) == 0 {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
index dfcdc9f4b..f91b57d9a 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
@@ -13,11 +13,11 @@ import (
)
type FilterCorrectAZStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Only get hosts in the requested az.
-func (s *FilterCorrectAZStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterCorrectAZStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.AvailabilityZone == "" {
traceLog.Info("no availability zone requested, skipping filter_correct_az step")
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
index b995be916..49d9de046 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
@@ -33,7 +33,7 @@ type FilterExternalCustomerStep struct {
// Prefix-match the domain name for external customer domains and filter out hosts
// that are not intended for external customers.
-func (s *FilterExternalCustomerStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterExternalCustomerStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
domainName, err := request.Spec.Data.GetSchedulerHintStr("domain_name")
if err != nil {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
index 0a5b1339f..1e3bdb726 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
@@ -14,11 +14,11 @@ import (
)
type FilterHasAcceleratorsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// If requested, only get hosts with accelerators.
-func (s *FilterHasAcceleratorsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterHasAcceleratorsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
extraSpecs := request.Spec.Data.Flavor.Data.ExtraSpecs
if _, ok := extraSpecs["accel:device_profile"]; !ok {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
index 4b07ef56c..7bd12c69f 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
@@ -40,7 +40,7 @@ type FilterHasEnoughCapacity struct {
// known at this point.
//
// Please also note that disk space is currently not considered by this filter.
-func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
// This map holds the free resources per host.
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
index 35367dff3..05095ac67 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
@@ -15,13 +15,13 @@ import (
)
type FilterHasRequestedTraits struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Filter hosts that do not have the requested traits given by the extra spec:
// - "trait:": "forbidden" means the host must not have the specified trait.
// - "trait:": "required" means the host must have the specified trait.
-func (s *FilterHasRequestedTraits) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterHasRequestedTraits) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
var requiredTraits, forbiddenTraits []string
for key, value := range request.Spec.Data.Flavor.Data.ExtraSpecs {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
index cd57e2e4d..42562d244 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
@@ -12,13 +12,13 @@ import (
)
type FilterHostInstructionsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Filter hosts based on instructions given in the request spec. Supported are:
// - spec.ignore_hosts: Filter out all hosts in this list.
// - spec.force_hosts: Include only hosts in this list.
-func (s *FilterHostInstructionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterHostInstructionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if request.Spec.Data.IgnoreHosts != nil {
for _, host := range *request.Spec.Data.IgnoreHosts {
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
index fb42e7c19..ec5569d23 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
@@ -12,14 +12,14 @@ import (
)
type FilterInstanceGroupAffinityStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Select hosts in spec.instance_group.
func (s *FilterInstanceGroupAffinityStep) Run(
traceLog *slog.Logger,
request api.ExternalSchedulerRequest,
-) (*lib.StepResult, error) {
+) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
index e9390d9c3..bdd6c0910 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
@@ -14,7 +14,7 @@ import (
)
type FilterInstanceGroupAntiAffinityStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Select hosts not in spec_obj.instance_group but only until
@@ -22,7 +22,7 @@ type FilterInstanceGroupAntiAffinityStep struct {
func (s *FilterInstanceGroupAntiAffinityStep) Run(
traceLog *slog.Logger,
request api.ExternalSchedulerRequest,
-) (*lib.StepResult, error) {
+) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
index f31e72516..4ae1a2365 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
@@ -15,7 +15,7 @@ import (
)
type FilterLiveMigratableStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Check if the encountered request spec is a live migration.
@@ -64,7 +64,7 @@ func (s *FilterLiveMigratableStep) checkHasSufficientFeatures(
func (s *FilterLiveMigratableStep) Run(
traceLog *slog.Logger,
request api.ExternalSchedulerRequest,
-) (*lib.StepResult, error) {
+) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
index b987e9c15..673084901 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
@@ -727,8 +727,8 @@ func TestFilterLiveMigratableStep_Run(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
+ BaseFilterWeigherPipelineStep: lib.BaseFilterWeigherPipelineStep[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
Client: fakeClient,
},
},
@@ -814,8 +814,8 @@ func TestFilterLiveMigratableStep_Run_SourceHostNotFound(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
+ BaseFilterWeigherPipelineStep: lib.BaseFilterWeigherPipelineStep[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
Client: fakeClient,
},
},
@@ -860,8 +860,8 @@ func TestFilterLiveMigratableStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterLiveMigratableStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
+ BaseFilterWeigherPipelineStep: lib.BaseFilterWeigherPipelineStep[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
Client: fakeClient,
},
},
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
index a8d386c4d..de81adefc 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
@@ -13,11 +13,11 @@ import (
)
type FilterMaintenanceStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Check that the maintenance spec of the hypervisor doesn't prevent scheduling.
-func (s *FilterMaintenanceStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterMaintenanceStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
hvs := &hv1.HypervisorList{}
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
index bb443ef57..248ebe6a8 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
@@ -14,11 +14,11 @@ import (
)
type FilterPackedVirtqueueStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// If requested, only get hosts with packed virtqueues.
-func (s *FilterPackedVirtqueueStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterPackedVirtqueueStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
// We don't care about the value.
_, reqInSpecs := request.Spec.Data.Flavor.Data.ExtraSpecs["hw:virtio_packed_ring"]
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
index c9f0319fb..55e542c32 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
@@ -14,7 +14,7 @@ import (
)
type FilterRequestedDestinationStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// If `requested_destination` is set in the request spec, filter hosts
@@ -23,7 +23,7 @@ type FilterRequestedDestinationStep struct {
func (s *FilterRequestedDestinationStep) Run(
traceLog *slog.Logger,
request api.ExternalSchedulerRequest,
-) (*lib.StepResult, error) {
+) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
index 5952e4c3f..3a5f63d0e 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
@@ -494,8 +494,8 @@ func TestFilterRequestedDestinationStep_Run(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
+ BaseFilterWeigherPipelineStep: lib.BaseFilterWeigherPipelineStep[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
Client: fakeClient,
},
},
@@ -577,8 +577,8 @@ func TestFilterRequestedDestinationStep_Run_ClientError(t *testing.T) {
Build()
step := &FilterRequestedDestinationStep{
- BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
- BaseStep: lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts]{
+ BaseFilter: lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
+ BaseFilterWeigherPipelineStep: lib.BaseFilterWeigherPipelineStep[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]{
Client: fakeClient,
},
},
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
index 870aaa58b..daa1a5249 100644
--- a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
+++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
@@ -15,12 +15,12 @@ import (
)
type FilterStatusConditionsStep struct {
- lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyStepOpts]
+ lib.BaseFilter[api.ExternalSchedulerRequest, lib.EmptyFilterWeigherPipelineStepOpts]
}
// Check that all status conditions meet the expected values, for example,
// that the hypervisor is ready and not disabled.
-func (s *FilterStatusConditionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *FilterStatusConditionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
hvs := &hv1.HypervisorList{}
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
index 43086ef97..181cada8b 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
@@ -52,7 +52,7 @@ func (s *VMwareAntiAffinityNoisyProjectsStep) Init(ctx context.Context, client c
}
// Downvote the hosts a project is currently running on if it's noisy.
-func (s *VMwareAntiAffinityNoisyProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *VMwareAntiAffinityNoisyProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
index eff4be46a..e58679f55 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
@@ -61,7 +61,7 @@ func (s *VMwareAvoidLongTermContendedHostsStep) Init(ctx context.Context, client
}
// Downvote hosts that are highly contended.
-func (s *VMwareAvoidLongTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *VMwareAvoidLongTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
index aca0fd2a7..21a55ced6 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
@@ -61,7 +61,7 @@ func (s *VMwareAvoidShortTermContendedHostsStep) Init(ctx context.Context, clien
}
// Downvote hosts that are highly contended.
-func (s *VMwareAvoidShortTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *VMwareAvoidShortTermContendedHostsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
if !request.VMware {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
index 1ed84a140..db2086a86 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
@@ -54,7 +54,7 @@ func (s *VMwareGeneralPurposeBalancingStep) Init(ctx context.Context, client cli
}
// Pack VMs on hosts based on their flavor.
-func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *VMwareGeneralPurposeBalancingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
// Don't execute the step for non-hana flavors.
if strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
index 62a91de43..704d66ba6 100644
--- a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
+++ b/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
@@ -54,7 +54,7 @@ func (s *VMwareHanaBinpackingStep) Init(ctx context.Context, client client.Clien
}
// Pack VMs on hosts based on their flavor.
-func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) {
+func (s *VMwareHanaBinpackingStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
// Don't execute the step for non-hana flavors.
if !strings.Contains(request.Spec.Data.Flavor.Data.Name, "hana") {
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index 7f59415d6..0835cdfbc 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -38,7 +38,7 @@ import (
// reconfigure the pipelines as needed.
type DecisionPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[lib.Pipeline[pods.PodPipelineRequest]]
+ lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]
// Mutex to only allow one process at a time
processMu sync.Mutex
@@ -46,7 +46,7 @@ type DecisionPipelineController struct {
// Config for the scheduling operator.
Conf conf.Config
// Monitor to pass down to all pipelines.
- Monitor lib.PipelineMonitor
+ Monitor lib.FilterWeigherPipelineMonitor
}
// The type of pipeline this controller manages.
@@ -197,7 +197,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
func (c *DecisionPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[lib.Pipeline[pods.PodPipelineRequest]] {
+) lib.PipelineInitResult[lib.FilterWeigherPipeline[pods.PodPipelineRequest]] {
return lib.InitNewFilterWeigherPipeline(
ctx, c.Client, p.Name,
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 7a0720bcc..6a2253e38 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -118,15 +118,15 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[pods.PodPipelineRequest]]{
- Pipelines: map[string]lib.Pipeline[pods.PodPipelineRequest]{
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]{
+ Pipelines: map[string]lib.FilterWeigherPipeline[pods.PodPipelineRequest]{
"pods-scheduler": createMockPodPipeline(),
},
},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainPods,
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client
@@ -181,7 +181,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
func TestDecisionPipelineController_InitPipeline(t *testing.T) {
controller := &DecisionPipelineController{
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
tests := []struct {
@@ -407,14 +407,14 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
Build()
controller := &DecisionPipelineController{
- BasePipelineController: lib.BasePipelineController[lib.Pipeline[pods.PodPipelineRequest]]{
- Pipelines: map[string]lib.Pipeline[pods.PodPipelineRequest]{},
+ BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]{
+ Pipelines: map[string]lib.FilterWeigherPipeline[pods.PodPipelineRequest]{},
PipelineConfigs: map[string]v1alpha1.Pipeline{},
},
Conf: conf.Config{
SchedulingDomain: v1alpha1.SchedulingDomainPods,
},
- Monitor: lib.PipelineMonitor{},
+ Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client
@@ -526,7 +526,7 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
}
// Helper function to create a mock pipeline that works with the pod types
-func createMockPodPipeline() lib.Pipeline[pods.PodPipelineRequest] {
+func createMockPodPipeline() lib.FilterWeigherPipeline[pods.PodPipelineRequest] {
return &mockPodPipeline{}
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
index acacc6ea6..c378fe6c5 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
@@ -23,9 +23,9 @@ func (f *NodeAffinityFilter) Init(ctx context.Context, client client.Client, ste
return nil
}
-func (NodeAffinityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.StepResult, error) {
+func (NodeAffinityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64)
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
for _, node := range request.Nodes {
if matchesNodeAffinity(node, request.Pod) {
@@ -33,7 +33,7 @@ func (NodeAffinityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineReq
}
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
func matchesNodeAffinity(node corev1.Node, pod corev1.Pod) bool {
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
index c668e5f0a..02aca3896 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
@@ -22,9 +22,9 @@ func (f *NodeAvailableFilter) Init(ctx context.Context, client client.Client, st
return nil
}
-func (NodeAvailableFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.StepResult, error) {
+func (NodeAvailableFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64)
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
for _, node := range request.Nodes {
if isNodeHealthy(node) && isNodeSchedulable(node) {
@@ -32,7 +32,7 @@ func (NodeAvailableFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRe
}
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
func isNodeHealthy(node corev1.Node) bool {
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
index 70e897b6a..2e412f593 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
@@ -23,9 +23,9 @@ func (f *NodeCapacityFilter) Init(ctx context.Context, client client.Client, ste
return nil
}
-func (NodeCapacityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.StepResult, error) {
+func (NodeCapacityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64)
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
podRequests := helpers.GetPodResourceRequests(request.Pod)
@@ -35,7 +35,7 @@ func (NodeCapacityFilter) Run(traceLog *slog.Logger, request pods.PodPipelineReq
}
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
func hasCapacityForPod(node corev1.Node, podRequests corev1.ResourceList) bool {
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
index 08fbf1cd4..006c2d868 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
@@ -27,12 +27,12 @@ func (f *NoopFilter) Init(ctx context.Context, client client.Client, step v1alph
// not in the map are considered as filtered out.
// Provide a traceLog that contains the global request id and should
// be used to log the step's execution.
-func (NoopFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.StepResult, error) {
+func (NoopFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64, len(request.Nodes))
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
// Usually you would do some filtering here, or adjust the weights.
for _, node := range request.Nodes {
activations[node.Name] = 0.0
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
index 697c41466..d02af1849 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
+++ b/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
@@ -22,9 +22,9 @@ func (f *TaintFilter) Init(ctx context.Context, client client.Client, step v1alp
return nil
}
-func (TaintFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.StepResult, error) {
+func (TaintFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
activations := make(map[string]float64)
- stats := make(map[string]lib.StepStatistics)
+ stats := make(map[string]lib.FilterWeigherPipelineStepStatistics)
for _, node := range request.Nodes {
if canScheduleOnNode(node, request.Pod) {
@@ -32,7 +32,7 @@ func (TaintFilter) Run(traceLog *slog.Logger, request pods.PodPipelineRequest) (
}
}
- return &lib.StepResult{Activations: activations, Statistics: stats}, nil
+ return &lib.FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}, nil
}
func canScheduleOnNode(node corev1.Node, pod corev1.Pod) bool {
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
index 65ca207ab..07bd7d904 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
@@ -31,7 +31,7 @@ type BinpackingStep struct {
lib.BaseWeigher[api.PodPipelineRequest, BinpackingStepOpts]
}
-func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.StepResult, error) {
+func (s *BinpackingStep) Run(traceLog *slog.Logger, request api.PodPipelineRequest) (*lib.FilterWeigherPipelineStepResult, error) {
result := s.IncludeAllHostsFromRequest(request)
podResources := helpers.GetPodResourceRequests(request.Pod)
diff --git a/internal/scheduling/descheduling/nova/cycle_detector.go b/internal/scheduling/descheduling/nova/cycle_detector.go
index 1b501bddc..7bb7405b0 100644
--- a/internal/scheduling/descheduling/nova/cycle_detector.go
+++ b/internal/scheduling/descheduling/nova/cycle_detector.go
@@ -7,23 +7,17 @@ import (
"context"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type CycleDetector interface {
- // Initialize the cycle detector with needed clients.
- Init(ctx context.Context, client client.Client, conf conf.Config) error
- // Filter descheduling decisions to avoid cycles.
- Filter(ctx context.Context, decisions []plugins.Decision) ([]plugins.Decision, error)
-}
-
type cycleDetector struct {
// Nova API to get needed information for cycle detection.
novaAPI NovaAPI
}
-func NewCycleDetector() CycleDetector {
+func NewCycleDetector() lib.CycleDetector[plugins.VMDetection] {
return &cycleDetector{novaAPI: NewNovaAPI()}
}
@@ -32,7 +26,7 @@ func (c *cycleDetector) Init(ctx context.Context, client client.Client, conf con
return c.novaAPI.Init(ctx, client, conf)
}
-func (c *cycleDetector) Filter(ctx context.Context, decisions []plugins.Decision) ([]plugins.Decision, error) {
+func (c *cycleDetector) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
keep := make(map[string]struct{}, len(decisions))
for _, decision := range decisions {
// Get the migrations for the VM.
@@ -59,7 +53,7 @@ func (c *cycleDetector) Filter(ctx context.Context, decisions []plugins.Decision
keep[decision.VMID] = struct{}{}
}
}
- var output []plugins.Decision
+ var output []plugins.VMDetection
for _, decision := range decisions {
if _, ok := keep[decision.VMID]; ok {
output = append(output, decision)
diff --git a/internal/scheduling/descheduling/nova/cycle_detector_test.go b/internal/scheduling/descheduling/nova/cycle_detector_test.go
index ca343c6b6..c784436c1 100644
--- a/internal/scheduling/descheduling/nova/cycle_detector_test.go
+++ b/internal/scheduling/descheduling/nova/cycle_detector_test.go
@@ -43,14 +43,14 @@ func (m *mockCycleDetectorNovaAPI) GetServerMigrations(ctx context.Context, id s
func TestCycleDetector_Filter(t *testing.T) {
tests := []struct {
name string
- decisions []plugins.Decision
+ decisions []plugins.VMDetection
migrations map[string][]migration
- expected []plugins.Decision
+ expected []plugins.VMDetection
expectErr bool
}{
{
name: "no cycles - all decisions pass through",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
{VMID: "vm-2", Reason: "high memory", Host: "host-b"},
},
@@ -62,14 +62,14 @@ func TestCycleDetector_Filter(t *testing.T) {
{SourceCompute: "host-b", DestCompute: "host-c"},
},
},
- expected: []plugins.Decision{
+ expected: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
{VMID: "vm-2", Reason: "high memory", Host: "host-b"},
},
},
{
name: "simple cycle detected - decision filtered out",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{
@@ -78,11 +78,11 @@ func TestCycleDetector_Filter(t *testing.T) {
{SourceCompute: "host-b", DestCompute: "host-a"}, // Cycle back to host-a
},
},
- expected: []plugins.Decision{}, // Filtered out due to cycle
+ expected: []plugins.VMDetection{}, // Filtered out due to cycle
},
{
name: "three-hop cycle detected",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{
@@ -92,11 +92,11 @@ func TestCycleDetector_Filter(t *testing.T) {
{SourceCompute: "host-c", DestCompute: "host-a"}, // Cycle back to host-a
},
},
- expected: []plugins.Decision{}, // Filtered out due to cycle
+ expected: []plugins.VMDetection{}, // Filtered out due to cycle
},
{
name: "mixed scenarios - some cycles, some not",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"}, // Has cycle
{VMID: "vm-2", Reason: "high memory", Host: "host-x"}, // No cycle
{VMID: "vm-3", Reason: "high disk", Host: "host-y"}, // No migrations
@@ -112,14 +112,14 @@ func TestCycleDetector_Filter(t *testing.T) {
},
"vm-3": {}, // No migrations
},
- expected: []plugins.Decision{
+ expected: []plugins.VMDetection{
{VMID: "vm-2", Reason: "high memory", Host: "host-x"},
{VMID: "vm-3", Reason: "high disk", Host: "host-y"},
},
},
{
name: "complex cycle with multiple hops",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{
@@ -130,23 +130,23 @@ func TestCycleDetector_Filter(t *testing.T) {
{SourceCompute: "host-d", DestCompute: "host-b"}, // Cycle to host-b (not host-a)
},
},
- expected: []plugins.Decision{}, // Filtered out due to cycle
+ expected: []plugins.VMDetection{}, // Filtered out due to cycle
},
{
name: "no migrations - decision passes through",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{
"vm-1": {}, // No migrations
},
- expected: []plugins.Decision{
+ expected: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
},
{
name: "single migration - no cycle possible",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{
@@ -154,13 +154,13 @@ func TestCycleDetector_Filter(t *testing.T) {
{SourceCompute: "host-a", DestCompute: "host-b"},
},
},
- expected: []plugins.Decision{
+ expected: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
},
{
name: "API error when getting migrations",
- decisions: []plugins.Decision{
+ decisions: []plugins.VMDetection{
{VMID: "vm-1", Reason: "high CPU", Host: "host-a"},
},
migrations: map[string][]migration{},
@@ -201,33 +201,33 @@ func TestCycleDetector_Filter(t *testing.T) {
}
// Check if all expected decisions are present
- expectedMap := make(map[string]plugins.Decision)
+ expectedMap := make(map[string]plugins.VMDetection)
for _, d := range tt.expected {
expectedMap[d.VMID] = d
}
- for _, resultDecision := range result {
- expectedDecision, found := expectedMap[resultDecision.VMID]
+ for _, resultVMDetection := range result {
+ expectedVMDetection, found := expectedMap[resultVMDetection.VMID]
if !found {
- t.Errorf("unexpected decision for VM %s", resultDecision.VMID)
+ t.Errorf("unexpected decision for VM %s", resultVMDetection.VMID)
continue
}
- if resultDecision.Reason != expectedDecision.Reason {
+ if resultVMDetection.Reason != expectedVMDetection.Reason {
t.Errorf("expected reason %s for VM %s, got %s",
- expectedDecision.Reason, resultDecision.VMID, resultDecision.Reason)
+ expectedVMDetection.Reason, resultVMDetection.VMID, resultVMDetection.Reason)
}
- if resultDecision.Host != expectedDecision.Host {
+ if resultVMDetection.Host != expectedVMDetection.Host {
t.Errorf("expected host %s for VM %s, got %s",
- expectedDecision.Host, resultDecision.VMID, resultDecision.Host)
+ expectedVMDetection.Host, resultVMDetection.VMID, resultVMDetection.Host)
}
}
})
}
}
-func TestCycleDetector_Filter_EmptyDecisions(t *testing.T) {
+func TestCycleDetector_Filter_EmptyVMDetections(t *testing.T) {
mockAPI := &mockCycleDetectorNovaAPI{
migrations: map[string][]migration{},
}
@@ -235,7 +235,7 @@ func TestCycleDetector_Filter_EmptyDecisions(t *testing.T) {
detector := cycleDetector{novaAPI: mockAPI}
ctx := context.Background()
- result, err := detector.Filter(ctx, []plugins.Decision{})
+ result, err := detector.Filter(ctx, []plugins.VMDetection{})
if err != nil {
t.Errorf("unexpected error: %v", err)
diff --git a/internal/scheduling/descheduling/nova/detector.go b/internal/scheduling/descheduling/nova/detector.go
deleted file mode 100644
index f6e0e455c..000000000
--- a/internal/scheduling/descheduling/nova/detector.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package nova
-
-import (
- "context"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-type Detector interface {
- // Get the VMs on their current hosts that should be considered for descheduling.
- Run() ([]plugins.Decision, error)
- // Configure the step with a database and options.
- Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
-}
diff --git a/internal/scheduling/descheduling/nova/detector_test.go b/internal/scheduling/descheduling/nova/detector_test.go
deleted file mode 100644
index 2c60931c4..000000000
--- a/internal/scheduling/descheduling/nova/detector_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package nova
-
-import (
- "testing"
-
- "github.com/cobaltcore-dev/cortex/pkg/conf"
- "github.com/cobaltcore-dev/cortex/pkg/db"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-type MockOptions struct {
- Option1 string `json:"option1"`
- Option2 int `json:"option2"`
-}
-
-func (o MockOptions) Validate() error {
- return nil
-}
-
-type BaseStep struct {
- Options MockOptions
- DB *db.DB
- Client client.Client
-}
-
-func (s *BaseStep) Init(db *db.DB, client client.Client, opts conf.RawOpts) error {
- s.DB = db
- s.Client = client
- // Use the actual unmarshal logic from conf.RawOpts
- if err := opts.Unmarshal(&s.Options); err != nil {
- return err
- }
- return s.Options.Validate()
-}
-
-func TestBaseStep_Init(t *testing.T) {
- opts := conf.NewRawOpts(`{
- "option1": "value1",
- "option2": 2
- }`)
-
- step := &BaseStep{}
- err := step.Init(nil, nil, opts)
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
-
- if step.Options.Option1 != "value1" {
- t.Errorf("expected Option1 to be 'value1', got %s", step.Options.Option1)
- }
-
- if step.Options.Option2 != 2 {
- t.Errorf("expected Option2 to be 2, got %d", step.Options.Option2)
- }
-}
diff --git a/internal/scheduling/descheduling/nova/executor.go b/internal/scheduling/descheduling/nova/executor.go
index aec78a6ee..a0f5579d8 100644
--- a/internal/scheduling/descheduling/nova/executor.go
+++ b/internal/scheduling/descheduling/nova/executor.go
@@ -9,6 +9,8 @@ import (
"time"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"github.com/cobaltcore-dev/cortex/pkg/multicluster"
@@ -35,7 +37,7 @@ type Executor struct {
// Configuration for the descheduler.
Conf conf.Config
// Monitor for tracking the descheduler execution.
- Monitor Monitor
+ Monitor lib.DetectorMonitor[plugins.VMDetection]
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
diff --git a/internal/scheduling/descheduling/nova/executor_test.go b/internal/scheduling/descheduling/nova/executor_test.go
index 744f8a7c8..e9ace2b2a 100644
--- a/internal/scheduling/descheduling/nova/executor_test.go
+++ b/internal/scheduling/descheduling/nova/executor_test.go
@@ -10,6 +10,8 @@ import (
"time"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"k8s.io/apimachinery/pkg/api/meta"
@@ -74,8 +76,8 @@ func (m *mockExecutorNovaAPI) GetServerMigrations(ctx context.Context, id string
}
// Create a zero-value Monitor for testing
-func newMockMonitor() Monitor {
- return Monitor{}
+func newMockMonitor() lib.DetectorMonitor[plugins.VMDetection] {
+ return lib.DetectorMonitor[plugins.VMDetection]{}
}
func TestExecutor_Reconcile(t *testing.T) {
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index f1b575983..6ed8f685d 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -10,6 +10,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"github.com/cobaltcore-dev/cortex/pkg/multicluster"
@@ -30,14 +31,14 @@ import (
// reconfigure the pipelines as needed.
type DeschedulingsPipelineController struct {
// Toolbox shared between all pipeline controllers.
- lib.BasePipelineController[*Pipeline]
+ lib.BasePipelineController[*lib.DetectorPipeline[plugins.VMDetection]]
// Monitor to pass down to all pipelines.
- Monitor Monitor
+ Monitor lib.DetectorPipelineMonitor
// Config for the scheduling operator.
Conf conf.Config
// Cycle detector to avoid descheduling loops.
- CycleDetector CycleDetector
+ CycleDetector lib.CycleDetector[plugins.VMDetection]
}
// The type of pipeline this controller manages.
@@ -49,15 +50,15 @@ func (c *DeschedulingsPipelineController) PipelineType() v1alpha1.PipelineType {
func (c *DeschedulingsPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
-) lib.PipelineInitResult[*Pipeline] {
+) lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]] {
- pipeline := &Pipeline{
+ pipeline := &lib.DetectorPipeline[plugins.VMDetection]{
Client: c.Client,
CycleDetector: c.CycleDetector,
Monitor: c.Monitor.SubPipeline(p.Name),
}
nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
- return lib.PipelineInitResult[*Pipeline]{
+ return lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]]{
Pipeline: pipeline,
NonCriticalErr: nonCriticalErr,
CriticalErr: criticalErr,
@@ -78,9 +79,48 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
time.Sleep(jobloop.DefaultJitter(time.Minute))
continue
}
- if err := p.createDeschedulings(ctx); err != nil {
- slog.Error("descheduler: failed to create deschedulings", "error", err)
+ decisionsByStep := p.Run()
+ if len(decisionsByStep) == 0 {
+ slog.Info("descheduler: no decisions made in this run")
+ time.Sleep(jobloop.DefaultJitter(time.Minute))
+ continue
+ }
+ slog.Info("descheduler: decisions made", "decisionsByStep", decisionsByStep)
+ decisions := p.Combine(decisionsByStep)
+ var err error
+ decisions, err = p.CycleDetector.Filter(ctx, decisions)
+ if err != nil {
+ slog.Error("descheduler: failed to filter decisions for cycles", "error", err)
+ time.Sleep(jobloop.DefaultJitter(time.Minute))
+ continue
}
+ for _, decision := range decisions {
+ // Precaution: If a descheduling for the VM already exists, skip it.
+ // The TTL controller will clean up old deschedulings so the vm
+ // can be descheduled again later if needed, or we can manually
+ // delete the descheduling if we want to deschedule the VM again.
+ var existing v1alpha1.Descheduling
+ err := p.Get(ctx, client.ObjectKey{Name: decision.VMID}, &existing)
+ if err == nil {
+ slog.Info("descheduler: descheduling already exists for VM, skipping", "vmId", decision.VMID)
+ continue
+ }
+
+ descheduling := &v1alpha1.Descheduling{}
+ descheduling.Name = decision.VMID
+ descheduling.Spec.Ref = decision.VMID
+ descheduling.Spec.RefType = v1alpha1.DeschedulingSpecVMReferenceNovaServerUUID
+ descheduling.Spec.PrevHostType = v1alpha1.DeschedulingSpecHostTypeNovaComputeHostName
+ descheduling.Spec.PrevHost = decision.Host
+ descheduling.Spec.Reason = decision.Reason
+ if err := p.Create(ctx, descheduling); err != nil {
+ slog.Error("descheduler: failed to create descheduling", "error", err)
+ time.Sleep(jobloop.DefaultJitter(time.Minute))
+ continue
+ }
+ slog.Info("descheduler: created descheduling", "vmId", decision.VMID, "host", decision.Host, "reason", decision.Reason)
+ }
+
time.Sleep(jobloop.DefaultJitter(time.Minute))
}
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index f48924ac4..3ff81bf44 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -24,13 +24,13 @@ func (m *mockCycleDetector) Init(ctx context.Context, client client.Client, conf
return nil
}
-func (m *mockCycleDetector) Filter(ctx context.Context, decisions []plugins.Decision) ([]plugins.Decision, error) {
+func (m *mockCycleDetector) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
return decisions, nil
}
type mockControllerStep struct{}
-func (m *mockControllerStep) Run() ([]plugins.Decision, error) {
+func (m *mockControllerStep) Run() ([]plugins.VMDetection, error) {
return nil, nil
}
func (m *mockControllerStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
@@ -75,15 +75,15 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
controller := &DeschedulingsPipelineController{
- Monitor: NewPipelineMonitor(),
+ Monitor: lib.NewDetectorPipelineMonitor(),
CycleDetector: &mockCycleDetector{},
}
- pipeline := Pipeline{
+ pipeline := lib.DetectorPipeline[plugins.VMDetection]{
CycleDetector: controller.CycleDetector,
Monitor: controller.Monitor,
}
- nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]Detector{
+ nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]lib.Detector[plugins.VMDetection]{
"mock-step": &mockControllerStep{},
})
@@ -128,7 +128,7 @@ func TestDeschedulingsPipelineController_Reconcile(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).Build()
controller := &DeschedulingsPipelineController{
- BasePipelineController: lib.BasePipelineController[*Pipeline]{
+ BasePipelineController: lib.BasePipelineController[*lib.DetectorPipeline[plugins.VMDetection]]{
Client: client,
},
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_test.go b/internal/scheduling/descheduling/nova/pipeline_test.go
deleted file mode 100644
index 06058b312..000000000
--- a/internal/scheduling/descheduling/nova/pipeline_test.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package nova
-
-import (
- "context"
- "errors"
- "reflect"
- "testing"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Mock implementations for testing pipeline functionality
-
-type mockPipelineStep struct {
- decisions []plugins.Decision
- runError error
- initError error
- initialized bool
-}
-
-func (m *mockPipelineStep) Run() ([]plugins.Decision, error) {
- if m.runError != nil {
- return nil, m.runError
- }
- return m.decisions, nil
-}
-
-func (m *mockPipelineStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
- if m.initError != nil {
- return m.initError
- }
- m.initialized = true
- return nil
-}
-
-func TestPipeline_Init(t *testing.T) {
- tests := []struct {
- name string
- supportedSteps map[string]Detector
- confedSteps []v1alpha1.DetectorSpec
- expectedNonCriticalError bool
- expectedCriticalError bool
- }{
- {
- name: "successful initialization with single step",
- supportedSteps: map[string]Detector{
- "test-step": &mockPipelineStep{},
- },
- confedSteps: []v1alpha1.DetectorSpec{{
- Name: "test-step",
- }},
- expectedNonCriticalError: false,
- expectedCriticalError: false,
- },
- {
- name: "initialization with unsupported step",
- supportedSteps: map[string]Detector{
- "test-step": &mockPipelineStep{},
- },
- confedSteps: []v1alpha1.DetectorSpec{{
- Name: "unsupported-step",
- }},
- expectedNonCriticalError: true,
- expectedCriticalError: false,
- },
- {
- name: "initialization with step init error",
- supportedSteps: map[string]Detector{
- "failing-step": &mockPipelineStep{initError: errors.New("init failed")},
- },
- confedSteps: []v1alpha1.DetectorSpec{{
- Name: "failing-step",
- }},
- expectedNonCriticalError: true,
- expectedCriticalError: false,
- },
- {
- name: "initialization with multiple steps",
- supportedSteps: map[string]Detector{
- "step1": &mockPipelineStep{},
- "step2": &mockPipelineStep{},
- },
- confedSteps: []v1alpha1.DetectorSpec{
- {
- Name: "step1",
- },
- {
- Name: "step2",
- },
- },
- expectedNonCriticalError: false,
- expectedCriticalError: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- pipeline := &Pipeline{}
-
- nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.confedSteps, tt.supportedSteps)
- if tt.expectedCriticalError {
- if criticalErr == nil {
- t.Fatalf("expected critical error during initialization, got none")
- }
- return
- }
- if criticalErr != nil {
- t.Fatalf("Failed to initialize pipeline: %v", criticalErr)
- }
-
- if nonCriticalErr != nil && !tt.expectedNonCriticalError {
- t.Errorf("unexpected non-critical error during initialization: %v", nonCriticalErr)
- } else if nonCriticalErr == nil && tt.expectedNonCriticalError {
- t.Errorf("expected non-critical error during initialization, got none")
- }
- // Verify that successfully initialized steps are actually initialized
- for _, step := range pipeline.steps {
- if stepMonitor, ok := step.(StepMonitor); ok {
- if mockStep, ok := stepMonitor.step.(*mockPipelineStep); ok {
- if !mockStep.initialized {
- t.Error("step was not properly initialized")
- }
- }
- }
- }
- })
- }
-}
-
-func TestPipeline_run(t *testing.T) {
- tests := []struct {
- name string
- steps map[string]Detector
- order []string
- expectedResults map[string][]plugins.Decision
- }{
- {
- name: "successful run with single step",
- steps: map[string]Detector{
- "test-step": &mockPipelineStep{
- decisions: []plugins.Decision{
- {VMID: "vm1", Reason: "test reason", Host: "host1"},
- },
- },
- },
- order: []string{"test-step"},
- expectedResults: map[string][]plugins.Decision{
- "test-step": {
- {VMID: "vm1", Reason: "test reason", Host: "host1"},
- },
- },
- },
- {
- name: "run with step error",
- steps: map[string]Detector{
- "failing-step": &mockPipelineStep{
- runError: errors.New("step failed"),
- },
- },
- order: []string{"failing-step"},
- expectedResults: map[string][]plugins.Decision{},
- },
- {
- name: "run with step skipped",
- steps: map[string]Detector{
- "skipped-step": &mockPipelineStep{
- runError: lib.ErrStepSkipped,
- },
- },
- order: []string{"skipped-step"},
- expectedResults: map[string][]plugins.Decision{},
- },
- {
- name: "run with multiple steps",
- steps: map[string]Detector{
- "step1": &mockPipelineStep{
- decisions: []plugins.Decision{
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- },
- },
- "step2": &mockPipelineStep{
- decisions: []plugins.Decision{
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- },
- },
- order: []string{"step1", "step2"},
- expectedResults: map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- },
- "step2": {
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- pipeline := &Pipeline{
- steps: tt.steps,
- order: tt.order,
- }
-
- results := pipeline.run()
-
- if !reflect.DeepEqual(results, tt.expectedResults) {
- t.Errorf("expected results %v, got %v", tt.expectedResults, results)
- }
- })
- }
-}
-
-func TestPipeline_combine(t *testing.T) {
- tests := []struct {
- name string
- decisionsByStep map[string][]plugins.Decision
- expectedDecisions []plugins.Decision
- }{
- {
- name: "single decision per VM",
- decisionsByStep: map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- },
- expectedDecisions: []plugins.Decision{
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- },
- {
- name: "multiple decisions for same VM with same host",
- decisionsByStep: map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- },
- "step2": {
- {VMID: "vm1", Reason: "reason2", Host: "host1"},
- },
- },
- expectedDecisions: []plugins.Decision{
- {VMID: "vm1", Reason: "multiple reasons: reason1; reason2", Host: "host1"},
- },
- },
- {
- name: "multiple decisions for same VM with different hosts",
- decisionsByStep: map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- },
- "step2": {
- {VMID: "vm1", Reason: "reason2", Host: "host2"},
- },
- },
- expectedDecisions: []plugins.Decision{}, // Should be skipped due to conflicting hosts
- },
- {
- name: "mixed scenario",
- decisionsByStep: map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- "step2": {
- {VMID: "vm1", Reason: "reason3", Host: "host1"},
- {VMID: "vm3", Reason: "reason4", Host: "host3"},
- },
- },
- expectedDecisions: []plugins.Decision{
- {VMID: "vm1", Reason: "multiple reasons: reason1; reason3", Host: "host1"},
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- {VMID: "vm3", Reason: "reason4", Host: "host3"},
- },
- },
- {
- name: "empty input",
- decisionsByStep: map[string][]plugins.Decision{},
- expectedDecisions: []plugins.Decision{},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- pipeline := &Pipeline{}
- results := pipeline.combine(tt.decisionsByStep)
-
- // Sort results for consistent comparison
- if len(results) != len(tt.expectedDecisions) {
- t.Errorf("expected %d decisions, got %d", len(tt.expectedDecisions), len(results))
- return
- }
-
- // Create maps for easier comparison (order doesn't matter)
- expectedMap := make(map[string]plugins.Decision)
- for _, d := range tt.expectedDecisions {
- expectedMap[d.VMID] = d
- }
-
- resultMap := make(map[string]plugins.Decision)
- for _, d := range results {
- resultMap[d.VMID] = d
- }
-
- if !reflect.DeepEqual(expectedMap, resultMap) {
- t.Errorf("expected decisions %v, got %v", tt.expectedDecisions, results)
- }
- })
- }
-}
-
-func TestSupportedSteps(t *testing.T) {
- // Test that SupportedSteps is properly initialized
- if len(supportedDetectors) == 0 {
- t.Error("SupportedSteps should not be empty")
- }
-}
-
-// Benchmark tests
-func BenchmarkPipeline_run(b *testing.B) {
- steps := map[string]Detector{
- "step1": &mockPipelineStep{
- decisions: []plugins.Decision{
- {VMID: "vm1", Reason: "bench reason", Host: "host1"},
- },
- },
- "step2": &mockPipelineStep{
- decisions: []plugins.Decision{
- {VMID: "vm2", Reason: "bench reason", Host: "host2"},
- },
- },
- }
-
- pipeline := &Pipeline{
- steps: steps,
- order: []string{"step1", "step2"},
- }
-
- b.ResetTimer()
- for range b.N {
- pipeline.run()
- }
-}
-
-func BenchmarkPipeline_combine(b *testing.B) {
- decisionsByStep := map[string][]plugins.Decision{
- "step1": {
- {VMID: "vm1", Reason: "reason1", Host: "host1"},
- {VMID: "vm2", Reason: "reason2", Host: "host2"},
- },
- "step2": {
- {VMID: "vm1", Reason: "reason3", Host: "host1"},
- {VMID: "vm3", Reason: "reason4", Host: "host3"},
- },
- }
-
- pipeline := &Pipeline{}
-
- b.ResetTimer()
- for range b.N {
- pipeline.combine(decisionsByStep)
- }
-}
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
index 7e0a9d7e6..022c610f6 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
@@ -11,6 +11,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -22,12 +23,12 @@ type AvoidHighStealPctStepOpts struct {
type AvoidHighStealPctStep struct {
// Detector is a helper struct that provides common functionality for all descheduler steps.
- plugins.Detector[AvoidHighStealPctStepOpts]
+ lib.BaseDetector[AvoidHighStealPctStepOpts]
}
// Initialize the step and validate that all required knowledges are ready.
func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
- if err := s.Detector.Init(ctx, client, step); err != nil {
+ if err := s.BaseDetector.Init(ctx, client, step); err != nil {
return err
}
if err := s.CheckKnowledges(ctx, corev1.ObjectReference{Name: "kvm-libvirt-domain-cpu-steal-pct"}); err != nil {
@@ -36,7 +37,7 @@ func (s *AvoidHighStealPctStep) Init(ctx context.Context, client client.Client,
return nil
}
-func (s *AvoidHighStealPctStep) Run() ([]plugins.Decision, error) {
+func (s *AvoidHighStealPctStep) Run() ([]plugins.VMDetection, error) {
if s.Options.MaxStealPctOverObservedTimeSpan <= 0 {
slog.Info("skipping step because maxStealPctOverObservedTimeSpan is not set or <= 0")
return nil, nil
@@ -55,10 +56,10 @@ func (s *AvoidHighStealPctStep) Run() ([]plugins.Decision, error) {
if err != nil {
return nil, err
}
- var decisions []plugins.Decision
+ var decisions []plugins.VMDetection
for _, f := range features {
if f.MaxStealTimePct > s.Options.MaxStealPctOverObservedTimeSpan {
- decisions = append(decisions, plugins.Decision{
+ decisions = append(decisions, plugins.VMDetection{
VMID: f.InstanceUUID,
Reason: fmt.Sprintf("kvm monitoring indicates cpu steal pct %.2f%% which is above %.2f%% threshold", f.MaxStealTimePct, s.Options.MaxStealPctOverObservedTimeSpan),
Host: f.Host,
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go
index 59e1a5f0b..6da3ab2dc 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go
+++ b/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go
@@ -12,8 +12,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-// Decision represents a descheduling decision for testing
-type Decision struct {
+// VMDetection represents a descheduling decision for testing
+type VMDetection struct {
VMID string
Reason string
Host string
@@ -26,12 +26,12 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
}
tests := []struct {
- name string
- threshold float64
- features []compute.LibvirtDomainCPUStealPct
- expectedDecisions int
- expectedVMs []string
- expectSkip bool
+ name string
+ threshold float64
+ features []compute.LibvirtDomainCPUStealPct
+ expectedVMDetections int
+ expectedVMs []string
+ expectSkip bool
}{
{
name: "skip when threshold is zero",
@@ -46,9 +46,9 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
expectSkip: true,
},
{
- name: "no VMs above threshold",
- threshold: 80.0,
- expectedDecisions: 0,
+ name: "no VMs above threshold",
+ threshold: 80.0,
+ expectedVMDetections: 0,
features: []compute.LibvirtDomainCPUStealPct{
{InstanceUUID: "vm-1", Host: "host1", MaxStealTimePct: 50.0},
{InstanceUUID: "vm-2", Host: "host2", MaxStealTimePct: 75.0},
@@ -56,10 +56,10 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
},
},
{
- name: "some VMs above threshold",
- threshold: 70.0,
- expectedDecisions: 2,
- expectedVMs: []string{"vm-2", "vm-4"},
+ name: "some VMs above threshold",
+ threshold: 70.0,
+ expectedVMDetections: 2,
+ expectedVMs: []string{"vm-2", "vm-4"},
features: []compute.LibvirtDomainCPUStealPct{
{InstanceUUID: "vm-1", Host: "host1", MaxStealTimePct: 50.0},
{InstanceUUID: "vm-2", Host: "host2", MaxStealTimePct: 75.0},
@@ -68,10 +68,10 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
},
},
{
- name: "all VMs above threshold",
- threshold: 40.0,
- expectedDecisions: 3,
- expectedVMs: []string{"vm-1", "vm-2", "vm-3"},
+ name: "all VMs above threshold",
+ threshold: 40.0,
+ expectedVMDetections: 3,
+ expectedVMs: []string{"vm-1", "vm-2", "vm-3"},
features: []compute.LibvirtDomainCPUStealPct{
{InstanceUUID: "vm-1", Host: "host1", MaxStealTimePct: 50.0},
{InstanceUUID: "vm-2", Host: "host2", MaxStealTimePct: 75.0},
@@ -79,10 +79,10 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
},
},
{
- name: "VM exactly at threshold (should not be selected)",
- threshold: 75.0,
- expectedDecisions: 1,
- expectedVMs: []string{"vm-3"},
+ name: "VM exactly at threshold (should not be selected)",
+ threshold: 75.0,
+ expectedVMDetections: 1,
+ expectedVMs: []string{"vm-3"},
features: []compute.LibvirtDomainCPUStealPct{
{InstanceUUID: "vm-1", Host: "host1", MaxStealTimePct: 50.0},
{InstanceUUID: "vm-2", Host: "host2", MaxStealTimePct: 75.0}, // exactly at threshold
@@ -90,16 +90,16 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
},
},
{
- name: "empty database",
- threshold: 50.0,
- expectedDecisions: 0,
- features: []compute.LibvirtDomainCPUStealPct{},
+ name: "empty database",
+ threshold: 50.0,
+ expectedVMDetections: 0,
+ features: []compute.LibvirtDomainCPUStealPct{},
},
{
- name: "high precision values",
- threshold: 75.555,
- expectedDecisions: 1,
- expectedVMs: []string{"vm-2"},
+ name: "high precision values",
+ threshold: 75.555,
+ expectedVMDetections: 1,
+ expectedVMs: []string{"vm-2"},
features: []compute.LibvirtDomainCPUStealPct{
{InstanceUUID: "vm-1", Host: "host1", MaxStealTimePct: 75.554},
{InstanceUUID: "vm-2", Host: "host2", MaxStealTimePct: 75.556},
@@ -138,8 +138,8 @@ func TestAvoidHighStealPctStep_Run(t *testing.T) {
}
// Check number of decisions
- if len(decisions) != tt.expectedDecisions {
- t.Errorf("expected %d decisions, got %d", tt.expectedDecisions, len(decisions))
+ if len(decisions) != tt.expectedVMDetections {
+ t.Errorf("expected %d decisions, got %d", tt.expectedVMDetections, len(decisions))
}
// Check that the correct VMs were selected
diff --git a/internal/scheduling/descheduling/nova/plugins/vm_detection.go b/internal/scheduling/descheduling/nova/plugins/vm_detection.go
new file mode 100644
index 000000000..777dbe642
--- /dev/null
+++ b/internal/scheduling/descheduling/nova/plugins/vm_detection.go
@@ -0,0 +1,20 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package plugins
+
+import "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+
+type VMDetection struct {
+ // Get the VM ID for which this decision applies.
+ VMID string
+ // Get a human-readable reason for this decision.
+ Reason string
+ // Get the compute host where the vm should be migrated away from.
+ Host string
+}
+
+func (d VMDetection) GetResource() string { return d.VMID }
+func (d VMDetection) GetReason() string { return d.Reason }
+func (d VMDetection) GetHost() string { return d.Host }
+func (d VMDetection) WithReason(reason string) lib.Detection { d.Reason = reason; return d }
diff --git a/internal/scheduling/descheduling/nova/supported_steps.go b/internal/scheduling/descheduling/nova/supported_steps.go
index f56562f50..3d093eb77 100644
--- a/internal/scheduling/descheduling/nova/supported_steps.go
+++ b/internal/scheduling/descheduling/nova/supported_steps.go
@@ -3,10 +3,14 @@
package nova
-import "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins/kvm"
+import (
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins/kvm"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
// Configuration of steps supported by the descheduler.
// The steps actually used by the scheduler are defined through the configuration file.
-var supportedDetectors = map[string]Detector{
+var supportedDetectors = map[string]lib.Detector[plugins.VMDetection]{
"avoid_high_steal_pct": &kvm.AvoidHighStealPctStep{},
}
diff --git a/internal/scheduling/lib/base_filter.go b/internal/scheduling/lib/base_filter.go
deleted file mode 100644
index e774eaa95..000000000
--- a/internal/scheduling/lib/base_filter.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Common base for all steps that provides some functionality
-// that would otherwise be duplicated across all steps.
-type BaseFilter[RequestType PipelineRequest, Opts StepOpts] struct {
- BaseStep[RequestType, Opts]
-}
-
-// Init the filter with the database and options.
-func (s *BaseFilter[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
- return s.BaseStep.Init(ctx, client, step.Params)
-}
diff --git a/internal/scheduling/lib/base_step.go b/internal/scheduling/lib/base_step.go
deleted file mode 100644
index 0fd20a42b..000000000
--- a/internal/scheduling/lib/base_step.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
-
- "github.com/cobaltcore-dev/cortex/pkg/conf"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Common base for all steps that provides some functionality
-// that would otherwise be duplicated across all steps.
-type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct {
- // Options to pass via yaml to this step.
- conf.JsonOpts[Opts]
- // The activation function to use.
- ActivationFunction
- // The kubernetes client to use.
- Client client.Client
-}
-
-// Init the step with the database and options.
-func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, params runtime.RawExtension) error {
- opts := conf.NewRawOptsBytes(params.Raw)
- if err := s.Load(opts); err != nil {
- return err
- }
- if err := s.Options.Validate(); err != nil {
- return err
- }
-
- s.Client = client
- return nil
-}
-
-// Get a default result (no action) for the input weight keys given in the request.
-// Use this to initialize the result before applying filtering/weighing logic.
-func (s *BaseStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *StepResult {
- activations := make(map[string]float64)
- for _, subject := range request.GetSubjects() {
- activations[subject] = s.NoEffect()
- }
- stats := make(map[string]StepStatistics)
- return &StepResult{Activations: activations, Statistics: stats}
-}
-
-// Get default statistics for the input weight keys given in the request.
-func (s *BaseStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) StepStatistics {
- return StepStatistics{
- Unit: unit,
- Subjects: make(map[string]float64, len(request.GetSubjects())),
- }
-}
diff --git a/internal/scheduling/lib/base_weigher.go b/internal/scheduling/lib/base_weigher.go
deleted file mode 100644
index ac1fac034..000000000
--- a/internal/scheduling/lib/base_weigher.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/meta"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Common base for all steps that provides some functionality
-// that would otherwise be duplicated across all steps.
-type BaseWeigher[RequestType PipelineRequest, Opts StepOpts] struct {
- BaseStep[RequestType, Opts]
-}
-
-// Init the weigher with the database and options.
-func (s *BaseWeigher[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
- return s.BaseStep.Init(ctx, client, step.Params)
-}
-
-// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (d *BaseStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
- if d.Client == nil {
- return errors.New("kubernetes client not initialized")
- }
- for _, objRef := range kns {
- knowledge := &v1alpha1.Knowledge{}
- if err := d.Client.Get(ctx, client.ObjectKey{
- Name: objRef.Name,
- Namespace: objRef.Namespace,
- }, knowledge); err != nil {
- return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
- }
- // Check if the knowledge status conditions indicate an error.
- if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
- return fmt.Errorf("knowledge %s not ready", objRef.Name)
- }
- if knowledge.Status.RawLength == 0 {
- return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
- }
- }
- return nil
-}
diff --git a/internal/scheduling/lib/cycle_detector.go b/internal/scheduling/lib/cycle_detector.go
new file mode 100644
index 000000000..9c9796652
--- /dev/null
+++ b/internal/scheduling/lib/cycle_detector.go
@@ -0,0 +1,18 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+
+ "github.com/cobaltcore-dev/cortex/pkg/conf"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type CycleDetector[DetectionType Detection] interface {
+ // Initialize the cycle detector with needed clients.
+ Init(ctx context.Context, client client.Client, conf conf.Config) error
+ // Filter descheduling decisions to avoid cycles.
+ Filter(ctx context.Context, decisions []DetectionType) ([]DetectionType, error)
+}
diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/lib/detector.go
similarity index 63%
rename from internal/scheduling/descheduling/nova/plugins/base.go
rename to internal/scheduling/lib/detector.go
index 12ac275f2..752c637cd 100644
--- a/internal/scheduling/descheduling/nova/plugins/base.go
+++ b/internal/scheduling/lib/detector.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package plugins
+package lib
import (
"context"
@@ -15,9 +15,28 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
+type Detection interface {
+ // Get the ID of the detected resource.
+ GetResource() string
+ // Get the host on which this resource is currently located.
+ GetHost() string
+ // Get the reason for the detection.
+ GetReason() string
+ // Set the reason for the detection.
+ WithReason(reason string) Detection
+}
+
+type Detector[DetectionType Detection] interface {
+ // Detect resources such as VMs on their current hosts that should be
+ // considered for descheduling.
+ Run() ([]DetectionType, error)
+ // Configure the step with a database and options.
+ Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error
+}
+
// Common base for all descheduler steps that provides some functionality
// that would otherwise be duplicated across all steps.
-type Detector[Opts any] struct {
+type BaseDetector[Opts any] struct {
// Options to pass via yaml to this step.
conf.JsonOpts[Opts]
// The kubernetes client to use.
@@ -25,7 +44,7 @@ type Detector[Opts any] struct {
}
// Init the step with the database and options.
-func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (d *BaseDetector[Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
d.Client = client
opts := conf.NewRawOptsBytes(step.Params.Raw)
@@ -36,7 +55,7 @@ func (d *Detector[Opts]) Init(ctx context.Context, client client.Client, step v1
}
// Check if all knowledges are ready, and if not, return an error indicating why not.
-func (d *Detector[PipelineType]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+func (d *BaseDetector[Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
if d.Client == nil {
return errors.New("kubernetes client not initialized")
}
@@ -58,12 +77,3 @@ func (d *Detector[PipelineType]) CheckKnowledges(ctx context.Context, kns ...cor
}
return nil
}
-
-type Decision struct {
- // Get the VM ID for which this decision applies.
- VMID string
- // Get a human-readable reason for this decision.
- Reason string
- // Get the compute host where the vm should be migrated away from.
- Host string
-}
diff --git a/internal/scheduling/descheduling/nova/monitor.go b/internal/scheduling/lib/detector_monitor.go
similarity index 76%
rename from internal/scheduling/descheduling/nova/monitor.go
rename to internal/scheduling/lib/detector_monitor.go
index ae5daf87b..89e5a01a7 100644
--- a/internal/scheduling/descheduling/nova/monitor.go
+++ b/internal/scheduling/lib/detector_monitor.go
@@ -1,18 +1,17 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package nova
+package lib
import (
"context"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type Monitor struct {
+type DetectorPipelineMonitor struct {
// A histogram to measure how long each step takes to run.
stepRunTimer *prometheus.HistogramVec
// A counter to measure how many vm ids are selected for descheduling by each step.
@@ -26,8 +25,8 @@ type Monitor struct {
PipelineName string
}
-func NewPipelineMonitor() Monitor {
- return Monitor{
+func NewDetectorPipelineMonitor() DetectorPipelineMonitor {
+ return DetectorPipelineMonitor{
stepRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "cortex_descheduler_pipeline_step_run_duration_seconds",
Help: "Duration of descheduler pipeline step run",
@@ -51,29 +50,29 @@ func NewPipelineMonitor() Monitor {
}
// Get a copied pipeline monitor with the name set, after binding the metrics.
-func (m Monitor) SubPipeline(name string) Monitor {
+func (m DetectorPipelineMonitor) SubPipeline(name string) DetectorPipelineMonitor {
cp := m
cp.PipelineName = name
return cp
}
-func (m *Monitor) Describe(ch chan<- *prometheus.Desc) {
+func (m *DetectorPipelineMonitor) Describe(ch chan<- *prometheus.Desc) {
m.stepRunTimer.Describe(ch)
m.stepDeschedulingCounter.Describe(ch)
m.pipelineRunTimer.Describe(ch)
m.deschedulingRunTimer.Describe(ch)
}
-func (m *Monitor) Collect(ch chan<- prometheus.Metric) {
+func (m *DetectorPipelineMonitor) Collect(ch chan<- prometheus.Metric) {
m.stepRunTimer.Collect(ch)
m.stepDeschedulingCounter.Collect(ch)
m.pipelineRunTimer.Collect(ch)
m.deschedulingRunTimer.Collect(ch)
}
-type StepMonitor struct {
+type DetectorMonitor[DetectionType Detection] struct {
// The step being monitored.
- step Detector
+ step Detector[DetectionType]
// The name of this step.
stepName string
// A timer to measure how long the step takes to run.
@@ -82,8 +81,13 @@ type StepMonitor struct {
descheduledCounter prometheus.Counter
}
-// Monitor a descheduler step by wrapping it with a StepMonitor.
-func monitorStep(step Detector, conf v1alpha1.DetectorSpec, monitor Monitor) StepMonitor {
+// Monitor a descheduler step by wrapping it with a DetectorMonitor.
+func monitorDetector[DetectionType Detection](
+ step Detector[DetectionType],
+ conf v1alpha1.DetectorSpec,
+ monitor DetectorPipelineMonitor,
+) DetectorMonitor[DetectionType] {
+
var runTimer prometheus.Observer
if monitor.stepRunTimer != nil {
runTimer = monitor.stepRunTimer.WithLabelValues(conf.Name)
@@ -92,7 +96,7 @@ func monitorStep(step Detector, conf v1alpha1.DetectorSpec, monitor Monitor) Ste
if monitor.stepDeschedulingCounter != nil {
descheduledCounter = monitor.stepDeschedulingCounter.WithLabelValues(conf.Name)
}
- return StepMonitor{
+ return DetectorMonitor[DetectionType]{
step: step,
stepName: conf.Name,
runTimer: runTimer,
@@ -101,22 +105,25 @@ func monitorStep(step Detector, conf v1alpha1.DetectorSpec, monitor Monitor) Ste
}
// Initialize the step with the database and options.
-func (m StepMonitor) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+func (m DetectorMonitor[DetectionType]) Init(
+ ctx context.Context, client client.Client, step v1alpha1.DetectorSpec,
+) error {
+
return m.step.Init(ctx, client, step)
}
// Run the step and measure its execution time.
-func (m StepMonitor) Run() ([]plugins.Decision, error) {
+func (m DetectorMonitor[DetectionType]) Run() ([]DetectionType, error) {
if m.runTimer != nil {
timer := prometheus.NewTimer(m.runTimer)
defer timer.ObserveDuration()
}
- vmsToDeschedule, err := m.step.Run()
+ detections, err := m.step.Run()
if err != nil {
return nil, err
}
if m.descheduledCounter != nil {
- m.descheduledCounter.Add(float64(len(vmsToDeschedule)))
+ m.descheduledCounter.Add(float64(len(detections)))
}
- return vmsToDeschedule, nil
+ return detections, nil
}
diff --git a/internal/scheduling/descheduling/nova/monitor_test.go b/internal/scheduling/lib/detector_monitor_test.go
similarity index 80%
rename from internal/scheduling/descheduling/nova/monitor_test.go
rename to internal/scheduling/lib/detector_monitor_test.go
index ed7416848..12b987b23 100644
--- a/internal/scheduling/descheduling/nova/monitor_test.go
+++ b/internal/scheduling/lib/detector_monitor_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package nova
+package lib
import (
"context"
@@ -9,15 +9,14 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-func TestNewPipelineMonitor(t *testing.T) {
- monitor := NewPipelineMonitor()
+func TestNewDetectorPipelineMonitor(t *testing.T) {
+ monitor := NewDetectorPipelineMonitor()
if monitor.stepRunTimer == nil {
t.Error("expected stepRunTimer to be initialized")
@@ -34,7 +33,7 @@ func TestNewPipelineMonitor(t *testing.T) {
}
func TestMonitor_Describe(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
descs := make(chan *prometheus.Desc, 10)
go func() {
@@ -53,7 +52,7 @@ func TestMonitor_Describe(t *testing.T) {
}
func TestMonitor_Collect(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
metrics := make(chan prometheus.Metric, 10)
go func() {
@@ -73,7 +72,7 @@ func TestMonitor_Collect(t *testing.T) {
}
type mockMonitorStep struct {
- decisions []plugins.Decision
+ decisions []mockDetection
initError error
runError error
initCalled bool
@@ -85,21 +84,21 @@ func (m *mockMonitorStep) Init(ctx context.Context, client client.Client, step v
return m.initError
}
-func (m *mockMonitorStep) Run() ([]plugins.Decision, error) {
+func (m *mockMonitorStep) Run() ([]mockDetection, error) {
m.runCalled = true
return m.decisions, m.runError
}
func TestMonitorStep(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
step := &mockMonitorStep{
- decisions: []plugins.Decision{
- {VMID: "vm1", Reason: "test"},
+ decisions: []mockDetection{
+ {resource: "vm1", reason: "test"},
},
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
if monitoredStep.step != step {
t.Error("expected wrapped step to be preserved")
@@ -115,11 +114,11 @@ func TestMonitorStep(t *testing.T) {
}
func TestStepMonitor_Init(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
step := &mockMonitorStep{}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
client := fake.NewClientBuilder().Build()
err := monitoredStep.Init(context.Background(), client, conf)
@@ -134,13 +133,13 @@ func TestStepMonitor_Init(t *testing.T) {
}
func TestStepMonitor_Init_WithError(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
expectedErr := errors.New("init failed")
step := &mockMonitorStep{
initError: expectedErr,
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
client := fake.NewClientBuilder().Build()
err := monitoredStep.Init(context.Background(), client, conf)
@@ -151,16 +150,16 @@ func TestStepMonitor_Init_WithError(t *testing.T) {
}
func TestStepMonitor_Run(t *testing.T) {
- monitor := NewPipelineMonitor()
- decisions := []plugins.Decision{
- {VMID: "vm1", Reason: "test1"},
- {VMID: "vm2", Reason: "test2"},
+ monitor := NewDetectorPipelineMonitor()
+ decisions := []mockDetection{
+ {resource: "vm1", reason: "test1"},
+ {resource: "vm2", reason: "test2"},
}
step := &mockMonitorStep{
decisions: decisions,
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -184,13 +183,13 @@ func TestStepMonitor_Run(t *testing.T) {
}
func TestStepMonitor_Run_WithError(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
expectedErr := errors.New("run failed")
step := &mockMonitorStep{
runError: expectedErr,
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -210,12 +209,12 @@ func TestStepMonitor_Run_WithError(t *testing.T) {
}
func TestStepMonitor_Run_EmptyResult(t *testing.T) {
- monitor := NewPipelineMonitor()
+ monitor := NewDetectorPipelineMonitor()
step := &mockMonitorStep{
- decisions: []plugins.Decision{}, // Empty slice
+ decisions: []mockDetection{}, // Empty slice
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
result, err := monitoredStep.Run()
@@ -236,14 +235,14 @@ func TestStepMonitor_Run_EmptyResult(t *testing.T) {
func TestMonitorStep_WithNilMonitor(t *testing.T) {
// Test with empty monitor (nil fields)
- monitor := Monitor{}
+ monitor := DetectorPipelineMonitor{}
step := &mockMonitorStep{
- decisions: []plugins.Decision{
- {VMID: "vm1", Reason: "test"},
+ decisions: []mockDetection{
+ {resource: "vm1", reason: "test"},
},
}
conf := v1alpha1.DetectorSpec{Name: "test-step"}
- monitoredStep := monitorStep(step, conf, monitor)
+ monitoredStep := monitorDetector(step, conf, monitor)
// Should not panic with nil timers/counters
result, err := monitoredStep.Run()
diff --git a/internal/scheduling/descheduling/nova/pipeline.go b/internal/scheduling/lib/detector_pipeline.go
similarity index 50%
rename from internal/scheduling/descheduling/nova/pipeline.go
rename to internal/scheduling/lib/detector_pipeline.go
index 60e1d8c93..61b46f04d 100644
--- a/internal/scheduling/descheduling/nova/pipeline.go
+++ b/internal/scheduling/lib/detector_pipeline.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package nova
+package lib
import (
"context"
@@ -12,42 +12,40 @@ import (
"sync"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type Pipeline struct {
+type DetectorPipeline[DetectionType Detection] struct {
// Kubernetes client to create descheduling resources.
client.Client
// Cycle detector to avoid cycles in descheduling.
- CycleDetector CycleDetector
+ CycleDetector CycleDetector[DetectionType]
// Monitor to use for tracking the pipeline.
- Monitor Monitor
+ Monitor DetectorPipelineMonitor
// The order in which scheduler steps are applied, by their step name.
order []string
// The steps by their name.
- steps map[string]Detector
+ steps map[string]Detector[DetectionType]
}
-func (p *Pipeline) Init(
+func (p *DetectorPipeline[DetectionType]) Init(
ctx context.Context,
confedSteps []v1alpha1.DetectorSpec,
- supportedSteps map[string]Detector,
+ supportedSteps map[string]Detector[DetectionType],
) (nonCriticalErr, criticalErr error) {
p.order = []string{}
// Load all steps from the configuration.
- p.steps = make(map[string]Detector, len(confedSteps))
+ p.steps = make(map[string]Detector[DetectionType], len(confedSteps))
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
nonCriticalErr = errors.New("descheduler: unsupported step name: " + stepConf.Name)
continue // Descheduler steps are optional.
}
- step = monitorStep(step, stepConf, p.Monitor)
+ step = monitorDetector(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
nonCriticalErr = errors.New("descheduler: failed to initialize step " + stepConf.Name + ": " + err.Error())
continue // Descheduler steps are optional.
@@ -61,19 +59,19 @@ func (p *Pipeline) Init(
// Execute the descheduler steps in parallel and collect the decisions made by
// each step.
-func (p *Pipeline) run() map[string][]plugins.Decision {
+func (p *DetectorPipeline[DetectionType]) Run() map[string][]DetectionType {
if p.Monitor.pipelineRunTimer != nil {
timer := prometheus.NewTimer(p.Monitor.pipelineRunTimer)
defer timer.ObserveDuration()
}
var lock sync.Mutex
- decisionsByStep := map[string][]plugins.Decision{}
+ decisionsByStep := map[string][]DetectionType{}
var wg sync.WaitGroup
for stepName, step := range p.steps {
wg.Go(func() {
slog.Info("descheduler: running step")
decisions, err := step.Run()
- if errors.Is(err, lib.ErrStepSkipped) {
+ if errors.Is(err, ErrStepSkipped) {
slog.Info("descheduler: step skipped")
return
}
@@ -91,40 +89,45 @@ func (p *Pipeline) run() map[string][]plugins.Decision {
return decisionsByStep
}
-// Combine the decisions made by each step into a single list of vms to deschedule.
-func (p *Pipeline) combine(decisionsByStep map[string][]plugins.Decision) []plugins.Decision {
+// Combine the decisions made by each step into a single list of resources to deschedule.
+func (p *DetectorPipeline[DetectionType]) Combine(decisionsByStep map[string][]DetectionType) []DetectionType {
// Order the step names to have a consistent order of processing.
stepNames := make([]string, 0, len(decisionsByStep))
for stepName := range decisionsByStep {
stepNames = append(stepNames, stepName)
}
slices.Sort(stepNames)
- // If there are more than one decision for the same vm, we need to combine them.
- decisionsByVMID := make(map[string][]plugins.Decision)
+ // If there are more than one decision for the same resource, we need to combine them.
+ decisionsByResource := make(map[string][]DetectionType)
for _, stepName := range stepNames {
decisions := decisionsByStep[stepName]
for _, decision := range decisions {
- decisionsByVMID[decision.VMID] = append(decisionsByVMID[decision.VMID], decision)
+ decisionsByResource[decision.GetResource()] = append(
+ decisionsByResource[decision.GetResource()], decision,
+ )
}
}
- combinedDecisions := make([]plugins.Decision, 0, len(decisionsByVMID))
- for vmID, decisions := range decisionsByVMID {
+ combinedDecisions := make([]DetectionType, 0, len(decisionsByResource))
+ for resource, decisions := range decisionsByResource {
+ if len(decisions) == 0 {
+ continue
+ }
if len(decisions) == 1 {
combinedDecisions = append(combinedDecisions, decisions[0])
continue
}
- // If the host is not the same in all decisions, we need to skip this vm.
- host := decisions[0].Host
+ // All hosts should be the same for the same resource.
+ host := decisions[0].GetHost()
sameHost := true
for _, decision := range decisions[1:] {
- if decision.Host != host {
+ if decision.GetHost() != host {
sameHost = false
break
}
}
if !sameHost {
- slog.Error("descheduler: skipping vm with conflicting origin hosts", "vmId", vmID, "decisions", decisions)
+ slog.Error("descheduler: conflicting hosts for combined decisions", "resource", resource, "decisions", decisions)
continue
}
var reasonBuilder strings.Builder
@@ -133,56 +136,13 @@ func (p *Pipeline) combine(decisionsByStep map[string][]plugins.Decision) []plug
if i > 0 {
reasonBuilder.WriteString("; ")
}
- reasonBuilder.WriteString(decision.Reason)
+ reasonBuilder.WriteString(decision.GetReason())
}
- combinedDecisions = append(combinedDecisions, plugins.Decision{
- VMID: vmID,
- Reason: reasonBuilder.String(),
- Host: host,
- })
+ mergedDecision := decisions[0]
+ mergedDecision = mergedDecision.WithReason(reasonBuilder.String()).(DetectionType)
+ combinedDecisions = append(combinedDecisions, mergedDecision)
}
slog.Info("descheduler: combined decisions", "combined", combinedDecisions)
return combinedDecisions
}
-
-func (p *Pipeline) createDeschedulings(ctx context.Context) error {
- decisionsByStep := p.run()
- if len(decisionsByStep) == 0 {
- slog.Info("descheduler: no decisions made in this run")
- return nil
- }
- slog.Info("descheduler: decisions made", "decisionsByStep", decisionsByStep)
- decisions := p.combine(decisionsByStep)
- var err error
- decisions, err = p.CycleDetector.Filter(ctx, decisions)
- if err != nil {
- slog.Error("descheduler: failed to filter decisions for cycles", "error", err)
- return err
- }
- for _, decision := range decisions {
- // Precaution: If a descheduling for the VM already exists, skip it.
- // The TTL controller will clean up old deschedulings so the vm
- // can be descheduled again later if needed, or we can manually
- // delete the descheduling if we want to deschedule the VM again.
- var existing v1alpha1.Descheduling
- err := p.Get(ctx, client.ObjectKey{Name: decision.VMID}, &existing)
- if err == nil {
- slog.Info("descheduler: descheduling already exists for VM, skipping", "vmId", decision.VMID)
- continue
- }
-
- descheduling := &v1alpha1.Descheduling{}
- descheduling.Name = decision.VMID
- descheduling.Spec.Ref = decision.VMID
- descheduling.Spec.RefType = v1alpha1.DeschedulingSpecVMReferenceNovaServerUUID
- descheduling.Spec.PrevHostType = v1alpha1.DeschedulingSpecHostTypeNovaComputeHostName
- descheduling.Spec.PrevHost = decision.Host
- descheduling.Spec.Reason = decision.Reason
- if err := p.Create(ctx, descheduling); err != nil {
- return err
- }
- slog.Info("descheduler: created descheduling", "vmId", decision.VMID, "host", decision.Host, "reason", decision.Reason)
- }
- return nil
-}
diff --git a/internal/scheduling/lib/detector_pipeline_test.go b/internal/scheduling/lib/detector_pipeline_test.go
new file mode 100644
index 000000000..f5e4e9efe
--- /dev/null
+++ b/internal/scheduling/lib/detector_pipeline_test.go
@@ -0,0 +1,4 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
diff --git a/internal/scheduling/descheduling/nova/plugins/base_test.go b/internal/scheduling/lib/detector_test.go
similarity index 60%
rename from internal/scheduling/descheduling/nova/plugins/base_test.go
rename to internal/scheduling/lib/detector_test.go
index 9b052bbca..51ef00eaa 100644
--- a/internal/scheduling/descheduling/nova/plugins/base_test.go
+++ b/internal/scheduling/lib/detector_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package plugins
+package lib
import (
"testing"
@@ -11,17 +11,28 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-type MockOptions struct {
+type mockDetection struct {
+ resource string
+ host string
+ reason string
+}
+
+func (d mockDetection) GetResource() string { return d.resource }
+func (d mockDetection) GetHost() string { return d.host }
+func (d mockDetection) GetReason() string { return d.reason }
+func (d mockDetection) WithReason(reason string) Detection { d.reason = reason; return d }
+
+type mockDetectorOptions struct {
Option1 string `json:"option1"`
Option2 int `json:"option2"`
}
-func (o MockOptions) Validate() error {
+func (o mockDetectorOptions) Validate() error {
return nil
}
func TestDetector_Init(t *testing.T) {
- step := Detector[MockOptions]{}
+ step := BaseDetector[mockDetectorOptions]{}
cl := fake.NewClientBuilder().Build()
err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
Params: runtime.RawExtension{Raw: []byte(`{
diff --git a/internal/scheduling/lib/filter.go b/internal/scheduling/lib/filter.go
index 7e66d2361..552a6bd2d 100644
--- a/internal/scheduling/lib/filter.go
+++ b/internal/scheduling/lib/filter.go
@@ -11,9 +11,20 @@ import (
)
// Interface for a filter as part of the scheduling pipeline.
-type Filter[RequestType PipelineRequest] interface {
- Step[RequestType]
+type Filter[RequestType FilterWeigherPipelineRequest] interface {
+ FilterWeigherPipelineStep[RequestType]
// Configure the filter and initialize things like a database connection.
Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error
}
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseFilter[RequestType FilterWeigherPipelineRequest, Opts FilterWeigherPipelineStepOpts] struct {
+ BaseFilterWeigherPipelineStep[RequestType, Opts]
+}
+
+// Init the filter with the database and options.
+func (s *BaseFilter[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
+ return s.BaseFilterWeigherPipelineStep.Init(ctx, client, step.Params)
+}
diff --git a/internal/scheduling/lib/filter_monitor.go b/internal/scheduling/lib/filter_monitor.go
index b09d4c09a..c60dbe090 100644
--- a/internal/scheduling/lib/filter_monitor.go
+++ b/internal/scheduling/lib/filter_monitor.go
@@ -12,18 +12,18 @@ import (
)
// Wraps a scheduler filter to monitor its execution.
-type FilterMonitor[RequestType PipelineRequest] struct {
+type FilterMonitor[RequestType FilterWeigherPipelineRequest] struct {
// The filter to monitor.
filter Filter[RequestType]
// The monitor tracking the step's execution.
- monitor *StepMonitor[RequestType]
+ monitor *FilterWeigherPipelineStepMonitor[RequestType]
}
// Wrap the given filter with a monitor.
-func monitorFilter[RequestType PipelineRequest](
+func monitorFilter[RequestType FilterWeigherPipelineRequest](
filter Filter[RequestType],
stepName string,
- m PipelineMonitor,
+ m FilterWeigherPipelineMonitor,
) *FilterMonitor[RequestType] {
return &FilterMonitor[RequestType]{
@@ -38,6 +38,6 @@ func (fm *FilterMonitor[RequestType]) Init(ctx context.Context, client client.Cl
}
// Run the filter and observe its execution.
-func (fm *FilterMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (fm *FilterMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
return fm.monitor.RunWrapped(traceLog, request, fm.filter)
}
diff --git a/internal/scheduling/lib/filter_test.go b/internal/scheduling/lib/filter_test.go
index 17098fcc4..a321a74a6 100644
--- a/internal/scheduling/lib/filter_test.go
+++ b/internal/scheduling/lib/filter_test.go
@@ -11,9 +11,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockFilter[RequestType PipelineRequest] struct {
+type mockFilter[RequestType FilterWeigherPipelineRequest] struct {
InitFunc func(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error
- RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
+ RunFunc func(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error)
}
func (m *mockFilter[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.FilterSpec) error {
@@ -22,9 +22,9 @@ func (m *mockFilter[RequestType]) Init(ctx context.Context, client client.Client
}
return m.InitFunc(ctx, client, step)
}
-func (m *mockFilter[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockFilter[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
if m.RunFunc == nil {
- return &StepResult{}, nil
+ return &FilterWeigherPipelineStepResult{}, nil
}
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/filter_validation.go b/internal/scheduling/lib/filter_validation.go
index 1bbb794ae..3117cffe0 100644
--- a/internal/scheduling/lib/filter_validation.go
+++ b/internal/scheduling/lib/filter_validation.go
@@ -13,7 +13,7 @@ import (
)
// Wrapper for scheduler steps that validates them before/after execution.
-type FilterValidator[RequestType PipelineRequest] struct {
+type FilterValidator[RequestType FilterWeigherPipelineRequest] struct {
// The wrapped filter to validate.
Filter Filter[RequestType]
}
@@ -25,12 +25,12 @@ func (s *FilterValidator[RequestType]) Init(ctx context.Context, client client.C
}
// Validate the wrapped filter with the database and options.
-func validateFilter[RequestType PipelineRequest](filter Filter[RequestType]) *FilterValidator[RequestType] {
+func validateFilter[RequestType FilterWeigherPipelineRequest](filter Filter[RequestType]) *FilterValidator[RequestType] {
return &FilterValidator[RequestType]{Filter: filter}
}
// Run the filter and validate what happens.
-func (s *FilterValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *FilterValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
result, err := s.Filter.Run(traceLog, request)
if err != nil {
return nil, err
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
index 10935938a..6ddcbc537 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -17,8 +17,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
+type FilterWeigherPipeline[RequestType FilterWeigherPipelineRequest] interface {
+ // Run the scheduling pipeline with the given request.
+ Run(request RequestType) (v1alpha1.DecisionResult, error)
+}
+
// Pipeline of scheduler steps.
-type filterWeigherPipeline[RequestType PipelineRequest] struct {
+type filterWeigherPipeline[RequestType FilterWeigherPipelineRequest] struct {
// The activation function to use when combining the
// results of the scheduler steps.
ActivationFunction
@@ -33,11 +38,11 @@ type filterWeigherPipeline[RequestType PipelineRequest] struct {
// Multipliers to apply to weigher outputs.
weighersMultipliers map[string]float64
// Monitor to observe the pipeline.
- monitor PipelineMonitor
+ monitor FilterWeigherPipelineMonitor
}
// Create a new pipeline with filters and weighers contained in the configuration.
-func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
+func InitNewFilterWeigherPipeline[RequestType FilterWeigherPipelineRequest](
ctx context.Context,
client client.Client,
name string,
@@ -45,15 +50,15 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
confedFilters []v1alpha1.FilterSpec,
supportedWeighers map[string]func() Weigher[RequestType],
confedWeighers []v1alpha1.WeigherSpec,
- monitor PipelineMonitor,
-) PipelineInitResult[Pipeline[RequestType]] {
+ monitor FilterWeigherPipelineMonitor,
+) PipelineInitResult[FilterWeigherPipeline[RequestType]] {
pipelineMonitor := monitor.SubPipeline(name)
// Ensure there are no overlaps between filter and weigher names.
for filterName := range supportedFilters {
if _, ok := supportedWeighers[filterName]; ok {
- return PipelineInitResult[Pipeline[RequestType]]{
+ return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
CriticalErr: errors.New("step name overlap between filters and weighers: " + filterName),
}
}
@@ -67,7 +72,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("supported:", "filters", maps.Keys(supportedFilters))
makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return PipelineInitResult[Pipeline[RequestType]]{
+ return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
CriticalErr: errors.New("unsupported filter name: " + filterConfig.Name),
}
}
@@ -75,7 +80,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
filter = monitorFilter(filter, filterConfig.Name, pipelineMonitor)
filter = validateFilter(filter)
if err := filter.Init(ctx, client, filterConfig); err != nil {
- return PipelineInitResult[Pipeline[RequestType]]{
+ return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
}
}
@@ -115,7 +120,7 @@ func InitNewFilterWeigherPipeline[RequestType PipelineRequest](
slog.Info("scheduler: added weigher", "name", weigherConfig.Name)
}
- return PipelineInitResult[Pipeline[RequestType]]{
+ return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
NonCriticalErr: nonCriticalErr,
Pipeline: &filterWeigherPipeline[RequestType]{
filtersOrder: filtersOrder,
diff --git a/internal/scheduling/lib/pipeline_monitor.go b/internal/scheduling/lib/filter_weigher_pipeline_monitor.go
similarity index 91%
rename from internal/scheduling/lib/pipeline_monitor.go
rename to internal/scheduling/lib/filter_weigher_pipeline_monitor.go
index 5fd7f48a9..4b8adf94c 100644
--- a/internal/scheduling/lib/pipeline_monitor.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_monitor.go
@@ -8,7 +8,7 @@ import (
)
// Collection of Prometheus metrics to monitor scheduler pipeline
-type PipelineMonitor struct {
+type FilterWeigherPipelineMonitor struct {
// The pipeline name is used to differentiate between different pipelines.
PipelineName string
@@ -33,7 +33,7 @@ type PipelineMonitor struct {
}
// Create a new scheduler monitor and register the necessary Prometheus metrics.
-func NewPipelineMonitor() PipelineMonitor {
+func NewPipelineMonitor() FilterWeigherPipelineMonitor {
buckets := []float64{}
buckets = append(buckets, prometheus.LinearBuckets(0, 1, 10)...)
buckets = append(buckets, prometheus.LinearBuckets(10, 10, 4)...)
@@ -43,7 +43,7 @@ func NewPipelineMonitor() PipelineMonitor {
Help: "From which index of the subject list the subject came from originally.",
Buckets: buckets,
}, []string{"pipeline", "step", "outidx"})
- return PipelineMonitor{
+ return FilterWeigherPipelineMonitor{
stepRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "cortex_scheduler_pipeline_step_run_duration_seconds",
Help: "Duration of scheduler pipeline step run",
@@ -87,14 +87,14 @@ func NewPipelineMonitor() PipelineMonitor {
}
// Get a copied pipeline monitor with the name set, after binding the metrics.
-func (m PipelineMonitor) SubPipeline(name string) PipelineMonitor {
+func (m FilterWeigherPipelineMonitor) SubPipeline(name string) FilterWeigherPipelineMonitor {
cp := m
cp.PipelineName = name
return cp
}
// Observe a scheduler pipeline result: subjects going in, and subjects going out.
-func (m *PipelineMonitor) observePipelineResult(request PipelineRequest, result []string) {
+func (m *FilterWeigherPipelineMonitor) observePipelineResult(request FilterWeigherPipelineRequest, result []string) {
// Observe the number of subjects going into the scheduler pipeline.
if m.subjectNumberInObserver != nil {
m.subjectNumberInObserver.
@@ -115,7 +115,7 @@ func (m *PipelineMonitor) observePipelineResult(request PipelineRequest, result
}
}
-func (m *PipelineMonitor) Describe(ch chan<- *prometheus.Desc) {
+func (m *FilterWeigherPipelineMonitor) Describe(ch chan<- *prometheus.Desc) {
m.stepRunTimer.Describe(ch)
m.stepSubjectWeight.Describe(ch)
m.stepRemovedSubjectsObserver.Describe(ch)
@@ -127,7 +127,7 @@ func (m *PipelineMonitor) Describe(ch chan<- *prometheus.Desc) {
m.requestCounter.Describe(ch)
}
-func (m *PipelineMonitor) Collect(ch chan<- prometheus.Metric) {
+func (m *FilterWeigherPipelineMonitor) Collect(ch chan<- prometheus.Metric) {
m.stepRunTimer.Collect(ch)
m.stepSubjectWeight.Collect(ch)
m.stepRemovedSubjectsObserver.Collect(ch)
diff --git a/internal/scheduling/lib/pipeline_monitor_test.go b/internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go
similarity index 100%
rename from internal/scheduling/lib/pipeline_monitor_test.go
rename to internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go
diff --git a/internal/scheduling/lib/pipeline_request.go b/internal/scheduling/lib/filter_weigher_pipeline_request.go
similarity index 84%
rename from internal/scheduling/lib/pipeline_request.go
rename to internal/scheduling/lib/filter_weigher_pipeline_request.go
index 4c49299bd..38387f6b2 100644
--- a/internal/scheduling/lib/pipeline_request.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_request.go
@@ -5,14 +5,14 @@ package lib
import "log/slog"
-type PipelineRequest interface {
+type FilterWeigherPipelineRequest interface {
// Get the subjects that went in the pipeline.
GetSubjects() []string
// This function can be used by the pipeline to obtain a mutated version
// of the request with only the given subjects remaining. This is helpful
// for steps that filter out subjects. Subjects not included in the map
// are considered as filtered out, and won't be reconsidered in later steps.
- FilterSubjects(includedSubjects map[string]float64) PipelineRequest
+ FilterSubjects(includedSubjects map[string]float64) FilterWeigherPipelineRequest
// Get the weights for the subjects.
GetWeights() map[string]float64
// Get logging args to be used in the step's trace log.
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_request_test.go b/internal/scheduling/lib/filter_weigher_pipeline_request_test.go
new file mode 100644
index 000000000..70752853c
--- /dev/null
+++ b/internal/scheduling/lib/filter_weigher_pipeline_request_test.go
@@ -0,0 +1,29 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import "log/slog"
+
+type mockFilterWeigherPipelineRequest struct {
+ WeightKeys []string
+ TraceLogArgs []slog.Attr
+ Subjects []string
+ Weights map[string]float64
+ Pipeline string
+}
+
+func (m mockFilterWeigherPipelineRequest) GetWeightKeys() []string { return m.WeightKeys }
+func (m mockFilterWeigherPipelineRequest) GetTraceLogArgs() []slog.Attr { return m.TraceLogArgs }
+func (m mockFilterWeigherPipelineRequest) GetSubjects() []string { return m.Subjects }
+func (m mockFilterWeigherPipelineRequest) GetWeights() map[string]float64 { return m.Weights }
+func (m mockFilterWeigherPipelineRequest) GetPipeline() string { return m.Pipeline }
+
+func (m mockFilterWeigherPipelineRequest) FilterSubjects(subjects map[string]float64) FilterWeigherPipelineRequest {
+ filteredSubjects := make([]string, 0, len(subjects))
+ for subject := range subjects {
+ filteredSubjects = append(filteredSubjects, subject)
+ }
+ m.Subjects = filteredSubjects
+ return m
+}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_step.go b/internal/scheduling/lib/filter_weigher_pipeline_step.go
new file mode 100644
index 000000000..81394a2ef
--- /dev/null
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step.go
@@ -0,0 +1,77 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/cobaltcore-dev/cortex/pkg/conf"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Steps can be chained together to form a scheduling pipeline.
+type FilterWeigherPipelineStep[RequestType FilterWeigherPipelineRequest] interface {
+ // Run this step in the scheduling pipeline.
+ //
+ // The request is immutable and modifications are stored in the result.
+ // This allows steps to be run in parallel (e.g. weighers) without passing
+ // mutable state around.
+ //
+ // All hosts that should not be filtered out must be included in the returned
+ // map of activations. I.e., filters implementing this interface should
+ // remove activations by omitting them from the returned map.
+ //
+ // Filters implementing this interface should adjust activation
+ // values in the returned map, including all hosts from the request.
+ //
+ // A traceLog is provided that contains the global request id and should
+ // be used to log the step's execution.
+ Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error)
+}
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseFilterWeigherPipelineStep[RequestType FilterWeigherPipelineRequest, Opts FilterWeigherPipelineStepOpts] struct {
+ // Options to pass via yaml to this step.
+ conf.JsonOpts[Opts]
+ // The activation function to use.
+ ActivationFunction
+ // The kubernetes client to use.
+ Client client.Client
+}
+
+// Init the step with the database and options.
+func (s *BaseFilterWeigherPipelineStep[RequestType, Opts]) Init(ctx context.Context, client client.Client, params runtime.RawExtension) error {
+ opts := conf.NewRawOptsBytes(params.Raw)
+ if err := s.Load(opts); err != nil {
+ return err
+ }
+ if err := s.Options.Validate(); err != nil {
+ return err
+ }
+
+ s.Client = client
+ return nil
+}
+
+// Get a default result (no action) for the input weight keys given in the request.
+// Use this to initialize the result before applying filtering/weighing logic.
+func (s *BaseFilterWeigherPipelineStep[RequestType, Opts]) IncludeAllHostsFromRequest(request RequestType) *FilterWeigherPipelineStepResult {
+ activations := make(map[string]float64)
+ for _, subject := range request.GetSubjects() {
+ activations[subject] = s.NoEffect()
+ }
+ stats := make(map[string]FilterWeigherPipelineStepStatistics)
+ return &FilterWeigherPipelineStepResult{Activations: activations, Statistics: stats}
+}
+
+// Get default statistics for the input weight keys given in the request.
+func (s *BaseFilterWeigherPipelineStep[RequestType, Opts]) PrepareStats(request RequestType, unit string) FilterWeigherPipelineStepStatistics {
+ return FilterWeigherPipelineStepStatistics{
+ Unit: unit,
+ Subjects: make(map[string]float64, len(request.GetSubjects())),
+ }
+}
diff --git a/internal/scheduling/lib/step_monitor.go b/internal/scheduling/lib/filter_weigher_pipeline_step_monitor.go
similarity index 94%
rename from internal/scheduling/lib/step_monitor.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_monitor.go
index 3459d5455..c46b4b28d 100644
--- a/internal/scheduling/lib/step_monitor.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_monitor.go
@@ -17,7 +17,7 @@ import (
)
// Wraps a scheduler step to monitor its execution.
-type StepMonitor[RequestType PipelineRequest] struct {
+type FilterWeigherPipelineStepMonitor[RequestType FilterWeigherPipelineRequest] struct {
// Mixin that can be embedded in a step to provide some activation function tooling.
ActivationFunction
@@ -39,7 +39,7 @@ type StepMonitor[RequestType PipelineRequest] struct {
}
// Schedule using the wrapped step and measure the time it takes.
-func monitorStep[RequestType PipelineRequest](stepName string, m PipelineMonitor) *StepMonitor[RequestType] {
+func monitorStep[RequestType FilterWeigherPipelineRequest](stepName string, m FilterWeigherPipelineMonitor) *FilterWeigherPipelineStepMonitor[RequestType] {
var runTimer prometheus.Observer
if m.stepRunTimer != nil {
runTimer = m.stepRunTimer.
@@ -50,7 +50,7 @@ func monitorStep[RequestType PipelineRequest](stepName string, m PipelineMonitor
removedSubjectsObserver = m.stepRemovedSubjectsObserver.
WithLabelValues(m.PipelineName, stepName)
}
- return &StepMonitor[RequestType]{
+ return &FilterWeigherPipelineStepMonitor[RequestType]{
runTimer: runTimer,
stepName: stepName,
pipelineName: m.PipelineName,
@@ -62,11 +62,11 @@ func monitorStep[RequestType PipelineRequest](stepName string, m PipelineMonitor
}
// Run the step and observe its execution.
-func (s *StepMonitor[RequestType]) RunWrapped(
+func (s *FilterWeigherPipelineStepMonitor[RequestType]) RunWrapped(
traceLog *slog.Logger,
request RequestType,
- step Step[RequestType],
-) (*StepResult, error) {
+ step FilterWeigherPipelineStep[RequestType],
+) (*FilterWeigherPipelineStepResult, error) {
if s.runTimer != nil {
timer := prometheus.NewTimer(s.runTimer)
diff --git a/internal/scheduling/lib/step_monitor_test.go b/internal/scheduling/lib/filter_weigher_pipeline_step_monitor_test.go
similarity index 92%
rename from internal/scheduling/lib/step_monitor_test.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_monitor_test.go
index bb05621d2..c26385425 100644
--- a/internal/scheduling/lib/step_monitor_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_monitor_test.go
@@ -21,20 +21,20 @@ func (m *mockObserver) Observe(value float64) {
func TestStepMonitorRun(t *testing.T) {
runTimer := &mockObserver{}
removedSubjectsObserver := &mockObserver{}
- monitor := &StepMonitor[mockPipelineRequest]{
+ monitor := &FilterWeigherPipelineStepMonitor[mockFilterWeigherPipelineRequest]{
stepName: "mock_step",
runTimer: runTimer,
stepSubjectWeight: nil,
removedSubjectsObserver: removedSubjectsObserver,
}
- step := &mockWeigher[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
+ step := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return &FilterWeigherPipelineStepResult{
Activations: map[string]float64{"subject1": 0.1, "subject2": 1.0, "subject3": 0.0},
}, nil
},
}
- request := mockPipelineRequest{
+ request := mockFilterWeigherPipelineRequest{
Subjects: []string{"subject1", "subject2", "subject3"},
Weights: map[string]float64{"subject1": 0.2, "subject2": 0.1, "subject3": 0.0},
}
diff --git a/internal/scheduling/lib/step_opts.go b/internal/scheduling/lib/filter_weigher_pipeline_step_opts.go
similarity index 61%
rename from internal/scheduling/lib/step_opts.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_opts.go
index 3ffa7b44d..1a26fc472 100644
--- a/internal/scheduling/lib/step_opts.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_opts.go
@@ -4,12 +4,12 @@
package lib
// Interface to which step options must conform.
-type StepOpts interface {
+type FilterWeigherPipelineStepOpts interface {
// Validate the options for this step.
Validate() error
}
// Empty step opts conforming to the StepOpts interface (validation always succeeds).
-type EmptyStepOpts struct{}
+type EmptyFilterWeigherPipelineStepOpts struct{}
-func (EmptyStepOpts) Validate() error { return nil }
+func (EmptyFilterWeigherPipelineStepOpts) Validate() error { return nil }
diff --git a/internal/scheduling/lib/step_opts_test.go b/internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go
similarity index 100%
rename from internal/scheduling/lib/step_opts_test.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go
diff --git a/internal/scheduling/lib/result.go b/internal/scheduling/lib/filter_weigher_pipeline_step_result.go
similarity index 82%
rename from internal/scheduling/lib/result.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_result.go
index 0c6340858..6dc3cf8d9 100644
--- a/internal/scheduling/lib/result.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_result.go
@@ -3,7 +3,7 @@
package lib
-type StepResult struct {
+type FilterWeigherPipelineStepResult struct {
// The activations calculated by this step.
Activations map[string]float64
@@ -22,10 +22,10 @@ type StepResult struct {
//
// These statistics are used to display the step's effect on the hosts.
// For example: max cpu contention: before [ 100%, 50%, 40% ], after [ 40%, 50%, 100% ]
- Statistics map[string]StepStatistics
+ Statistics map[string]FilterWeigherPipelineStepStatistics
}
-type StepStatistics struct {
+type FilterWeigherPipelineStepStatistics struct {
// The unit of the statistic.
Unit string
// The subjects and their values.
diff --git a/internal/scheduling/lib/base_step_test.go b/internal/scheduling/lib/filter_weigher_pipeline_step_test.go
similarity index 100%
rename from internal/scheduling/lib/base_step_test.go
rename to internal/scheduling/lib/filter_weigher_pipeline_step_test.go
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
index e56db31ba..25d9c2eaf 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -9,14 +9,19 @@ import (
"testing"
)
+// Mock pipeline type for testing
+type mockPipeline struct {
+ name string
+}
+
func TestPipeline_Run(t *testing.T) {
// Create an instance of the pipeline with a mock step
- pipeline := &filterWeigherPipeline[mockPipelineRequest]{
- filters: map[string]Filter[mockPipelineRequest]{
- "mock_filter": &mockFilter[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ pipeline := &filterWeigherPipeline[mockFilterWeigherPipelineRequest]{
+ filters: map[string]Filter[mockFilterWeigherPipelineRequest]{
+ "mock_filter": &mockFilter[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
// Filter out host3
- return &StepResult{
+ return &FilterWeigherPipelineStepResult{
Activations: map[string]float64{
"host1": 0.0,
"host2": 0.0,
@@ -26,16 +31,16 @@ func TestPipeline_Run(t *testing.T) {
},
},
filtersOrder: []string{"mock_filter"},
- weighers: map[string]Weigher[mockPipelineRequest]{
- "mock_weigher": &mockWeigher[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ weighers: map[string]Weigher[mockFilterWeigherPipelineRequest]{
+ "mock_weigher": &mockWeigher[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
// Assign weights to hosts
activations := map[string]float64{
"host1": 0.5,
"host2": 1.0,
"host3": -0.5,
}
- return &StepResult{
+ return &FilterWeigherPipelineStepResult{
Activations: activations,
}, nil
},
@@ -46,12 +51,12 @@ func TestPipeline_Run(t *testing.T) {
tests := []struct {
name string
- request mockPipelineRequest
+ request mockFilterWeigherPipelineRequest
expectedResult []string
}{
{
name: "Single step pipeline",
- request: mockPipelineRequest{
+ request: mockFilterWeigherPipelineRequest{
Subjects: []string{"host1", "host2", "host3"},
Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
},
@@ -78,7 +83,7 @@ func TestPipeline_Run(t *testing.T) {
}
func TestPipeline_NormalizeNovaWeights(t *testing.T) {
- p := &filterWeigherPipeline[mockPipelineRequest]{}
+ p := &filterWeigherPipeline[mockFilterWeigherPipelineRequest]{}
tests := []struct {
name string
@@ -113,8 +118,8 @@ func TestPipeline_NormalizeNovaWeights(t *testing.T) {
}
func TestPipeline_ApplyStepWeights(t *testing.T) {
- p := &filterWeigherPipeline[mockPipelineRequest]{
- weighers: map[string]Weigher[mockPipelineRequest]{},
+ p := &filterWeigherPipeline[mockFilterWeigherPipelineRequest]{
+ weighers: map[string]Weigher[mockFilterWeigherPipelineRequest]{},
weighersOrder: []string{"step1", "step2"},
}
@@ -154,7 +159,7 @@ func TestPipeline_ApplyStepWeights(t *testing.T) {
}
func TestPipeline_SortHostsByWeights(t *testing.T) {
- p := &filterWeigherPipeline[mockPipelineRequest]{}
+ p := &filterWeigherPipeline[mockFilterWeigherPipelineRequest]{}
tests := []struct {
name string
@@ -185,10 +190,10 @@ func TestPipeline_SortHostsByWeights(t *testing.T) {
}
func TestPipeline_RunFilters(t *testing.T) {
- mockStep := &mockFilter[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
+ mockStep := &mockFilter[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
// Filter out host3
- return &StepResult{
+ return &FilterWeigherPipelineStepResult{
Activations: map[string]float64{
"host1": 0.0,
"host2": 0.0,
@@ -196,16 +201,16 @@ func TestPipeline_RunFilters(t *testing.T) {
}, nil
},
}
- p := &filterWeigherPipeline[mockPipelineRequest]{
+ p := &filterWeigherPipeline[mockFilterWeigherPipelineRequest]{
filtersOrder: []string{
"mock_filter",
},
- filters: map[string]Filter[mockPipelineRequest]{
+ filters: map[string]Filter[mockFilterWeigherPipelineRequest]{
"mock_filter": mockStep,
},
}
- request := mockPipelineRequest{
+ request := mockFilterWeigherPipelineRequest{
Subjects: []string{"host1", "host2"},
Weights: map[string]float64{"host1": 0.0, "host2": 0.0, "host3": 0.0},
}
diff --git a/internal/scheduling/lib/pipeline.go b/internal/scheduling/lib/pipeline.go
deleted file mode 100644
index e6d3d6b86..000000000
--- a/internal/scheduling/lib/pipeline.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
-)
-
-type Pipeline[RequestType PipelineRequest] interface {
- // Run the scheduling pipeline with the given request.
- Run(request RequestType) (v1alpha1.DecisionResult, error)
-}
diff --git a/internal/scheduling/lib/base_pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
similarity index 100%
rename from internal/scheduling/lib/base_pipeline_controller.go
rename to internal/scheduling/lib/pipeline_controller.go
diff --git a/internal/scheduling/lib/base_pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
similarity index 100%
rename from internal/scheduling/lib/base_pipeline_controller_test.go
rename to internal/scheduling/lib/pipeline_controller_test.go
diff --git a/internal/scheduling/lib/pipeline_request_test.go b/internal/scheduling/lib/pipeline_request_test.go
deleted file mode 100644
index 455dcc8ac..000000000
--- a/internal/scheduling/lib/pipeline_request_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import "log/slog"
-
-type mockPipelineRequest struct {
- WeightKeys []string
- TraceLogArgs []slog.Attr
- Subjects []string
- Weights map[string]float64
- Pipeline string
-}
-
-func (m mockPipelineRequest) GetWeightKeys() []string { return m.WeightKeys }
-func (m mockPipelineRequest) GetTraceLogArgs() []slog.Attr { return m.TraceLogArgs }
-func (m mockPipelineRequest) GetSubjects() []string { return m.Subjects }
-func (m mockPipelineRequest) GetWeights() map[string]float64 { return m.Weights }
-func (m mockPipelineRequest) GetPipeline() string { return m.Pipeline }
-
-func (m mockPipelineRequest) FilterSubjects(subjects map[string]float64) PipelineRequest {
- filteredSubjects := make([]string, 0, len(subjects))
- for subject := range subjects {
- filteredSubjects = append(filteredSubjects, subject)
- }
- m.Subjects = filteredSubjects
- return m
-}
diff --git a/internal/scheduling/lib/pipeline_test.go b/internal/scheduling/lib/pipeline_test.go
deleted file mode 100644
index c5360bc88..000000000
--- a/internal/scheduling/lib/pipeline_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-// Mock pipeline type for testing
-type mockPipeline struct {
- name string
-}
diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go
deleted file mode 100644
index 328e1eff9..000000000
--- a/internal/scheduling/lib/step.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright SAP SE
-// SPDX-License-Identifier: Apache-2.0
-
-package lib
-
-import (
- "log/slog"
-)
-
-// Steps can be chained together to form a scheduling pipeline.
-type Step[RequestType PipelineRequest] interface {
- // Run this step in the scheduling pipeline.
- //
- // The request is immutable and modifications are stored in the result.
- // This allows steps to be run in parallel (e.g. weighers) without passing
- // mutable state around.
- //
- // All hosts that should not be filtered out must be included in the returned
- // map of activations. I.e., filters implementing this interface should
- // remove activations by omitting them from the returned map.
- //
- // Filters implementing this interface should adjust activation
- // values in the returned map, including all hosts from the request.
- //
- // A traceLog is provided that contains the global request id and should
- // be used to log the step's execution.
- Run(traceLog *slog.Logger, request RequestType) (*StepResult, error)
-}
diff --git a/internal/scheduling/lib/weigher.go b/internal/scheduling/lib/weigher.go
index dc7aa48d9..ef3d213b3 100644
--- a/internal/scheduling/lib/weigher.go
+++ b/internal/scheduling/lib/weigher.go
@@ -5,15 +5,54 @@ package lib
import (
"context"
+ "errors"
+ "fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Interface for a weigher as part of the scheduling pipeline.
-type Weigher[RequestType PipelineRequest] interface {
- Step[RequestType]
+type Weigher[RequestType FilterWeigherPipelineRequest] interface {
+ FilterWeigherPipelineStep[RequestType]
// Configure the step and initialize things like a database connection.
Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error
}
+
+// Common base for all steps that provides some functionality
+// that would otherwise be duplicated across all steps.
+type BaseWeigher[RequestType FilterWeigherPipelineRequest, Opts FilterWeigherPipelineStepOpts] struct {
+ BaseFilterWeigherPipelineStep[RequestType, Opts]
+}
+
+// Init the weigher with the database and options.
+func (s *BaseWeigher[RequestType, Opts]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
+ return s.BaseFilterWeigherPipelineStep.Init(ctx, client, step.Params)
+}
+
+// Check if all knowledges are ready, and if not, return an error indicating why not.
+func (d *BaseFilterWeigherPipelineStep[RequestType, Opts]) CheckKnowledges(ctx context.Context, kns ...corev1.ObjectReference) error {
+ if d.Client == nil {
+ return errors.New("kubernetes client not initialized")
+ }
+ for _, objRef := range kns {
+ knowledge := &v1alpha1.Knowledge{}
+ if err := d.Client.Get(ctx, client.ObjectKey{
+ Name: objRef.Name,
+ Namespace: objRef.Namespace,
+ }, knowledge); err != nil {
+ return fmt.Errorf("failed to get knowledge %s: %w", objRef.Name, err)
+ }
+ // Check if the knowledge status conditions indicate an error.
+ if meta.IsStatusConditionFalse(knowledge.Status.Conditions, v1alpha1.KnowledgeConditionReady) {
+ return fmt.Errorf("knowledge %s not ready", objRef.Name)
+ }
+ if knowledge.Status.RawLength == 0 {
+ return fmt.Errorf("knowledge %s not ready, no data available", objRef.Name)
+ }
+ }
+ return nil
+}
diff --git a/internal/scheduling/lib/weigher_monitor.go b/internal/scheduling/lib/weigher_monitor.go
index 9838c3325..e777a2e33 100644
--- a/internal/scheduling/lib/weigher_monitor.go
+++ b/internal/scheduling/lib/weigher_monitor.go
@@ -12,18 +12,18 @@ import (
)
// Wraps a scheduler weigher to monitor its execution.
-type WeigherMonitor[RequestType PipelineRequest] struct {
+type WeigherMonitor[RequestType FilterWeigherPipelineRequest] struct {
// The weigher to monitor.
weigher Weigher[RequestType]
// The monitor tracking the step's execution.
- monitor *StepMonitor[RequestType]
+ monitor *FilterWeigherPipelineStepMonitor[RequestType]
}
// Wrap the given weigher with a monitor.
-func monitorWeigher[RequestType PipelineRequest](
+func monitorWeigher[RequestType FilterWeigherPipelineRequest](
weigher Weigher[RequestType],
stepName string,
- m PipelineMonitor,
+ m FilterWeigherPipelineMonitor,
) *WeigherMonitor[RequestType] {
return &WeigherMonitor[RequestType]{
@@ -38,6 +38,6 @@ func (wm *WeigherMonitor[RequestType]) Init(ctx context.Context, client client.C
}
// Run the weigher and observe its execution.
-func (wm *WeigherMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (wm *WeigherMonitor[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
return wm.monitor.RunWrapped(traceLog, request, wm.weigher)
}
diff --git a/internal/scheduling/lib/weigher_test.go b/internal/scheduling/lib/weigher_test.go
index 70f8e4285..2cbe52239 100644
--- a/internal/scheduling/lib/weigher_test.go
+++ b/internal/scheduling/lib/weigher_test.go
@@ -11,9 +11,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockWeigher[RequestType PipelineRequest] struct {
+type mockWeigher[RequestType FilterWeigherPipelineRequest] struct {
InitFunc func(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error
- RunFunc func(traceLog *slog.Logger, request RequestType) (*StepResult, error)
+ RunFunc func(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error)
}
func (m *mockWeigher[RequestType]) Init(ctx context.Context, client client.Client, step v1alpha1.WeigherSpec) error {
@@ -22,9 +22,9 @@ func (m *mockWeigher[RequestType]) Init(ctx context.Context, client client.Clien
}
return m.InitFunc(ctx, client, step)
}
-func (m *mockWeigher[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (m *mockWeigher[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
if m.RunFunc == nil {
- return &StepResult{}, nil
+ return &FilterWeigherPipelineStepResult{}, nil
}
return m.RunFunc(traceLog, request)
}
diff --git a/internal/scheduling/lib/weigher_validation.go b/internal/scheduling/lib/weigher_validation.go
index a86e19ec5..8c398eb44 100644
--- a/internal/scheduling/lib/weigher_validation.go
+++ b/internal/scheduling/lib/weigher_validation.go
@@ -13,7 +13,7 @@ import (
)
// Wrapper for scheduler steps that validates them before/after execution.
-type WeigherValidator[RequestType PipelineRequest] struct {
+type WeigherValidator[RequestType FilterWeigherPipelineRequest] struct {
// The wrapped weigher to validate.
Weigher Weigher[RequestType]
}
@@ -25,12 +25,12 @@ func (s *WeigherValidator[RequestType]) Init(ctx context.Context, client client.
}
// Validate the wrapped weigher with the database and options.
-func validateWeigher[RequestType PipelineRequest](weigher Weigher[RequestType]) *WeigherValidator[RequestType] {
+func validateWeigher[RequestType FilterWeigherPipelineRequest](weigher Weigher[RequestType]) *WeigherValidator[RequestType] {
return &WeigherValidator[RequestType]{Weigher: weigher}
}
// Run the weigher and validate what happens.
-func (s *WeigherValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*StepResult, error) {
+func (s *WeigherValidator[RequestType]) Run(traceLog *slog.Logger, request RequestType) (*FilterWeigherPipelineStepResult, error) {
result, err := s.Weigher.Run(traceLog, request)
if err != nil {
return nil, err
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index d990826a2..f7c31caad 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -10,9 +10,9 @@ import (
)
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
- mockStep := &mockWeigher[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
+ mockStep := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return &FilterWeigherPipelineStepResult{
Activations: map[string]float64{
"host1": 1.0,
"host2": 1.0,
@@ -21,11 +21,11 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
},
}
- request := mockPipelineRequest{
+ request := mockFilterWeigherPipelineRequest{
Subjects: []string{"subject1", "subject2"},
}
- validator := WeigherValidator[mockPipelineRequest]{
+ validator := WeigherValidator[mockFilterWeigherPipelineRequest]{
Weigher: mockStep,
}
@@ -45,9 +45,9 @@ func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
}
func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
- mockStep := &mockWeigher[mockPipelineRequest]{
- RunFunc: func(traceLog *slog.Logger, request mockPipelineRequest) (*StepResult, error) {
- return &StepResult{
+ mockStep := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return &FilterWeigherPipelineStepResult{
Activations: map[string]float64{
"host1": 1.0,
},
@@ -55,11 +55,11 @@ func TestWeigherValidator_Run_HostNumberMismatch(t *testing.T) {
},
}
- request := mockPipelineRequest{
+ request := mockFilterWeigherPipelineRequest{
Subjects: []string{"subject1", "subject2"},
}
- validator := WeigherValidator[mockPipelineRequest]{
+ validator := WeigherValidator[mockFilterWeigherPipelineRequest]{
Weigher: mockStep,
}
From 01f7b801128f0d2dfc0940f943944bfe41a2e3d0 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 09:42:31 +0100
Subject: [PATCH 30/41] Rename cycle detector -> cycle breaker to avoid
confusion with detectors
---
cmd/main.go | 6 ++---
.../{cycle_detector.go => cycle_breaker.go} | 10 ++++-----
...detector_test.go => cycle_breaker_test.go} | 22 +++++++++----------
.../descheduling/nova/pipeline_controller.go | 12 +++++-----
.../nova/pipeline_controller_test.go | 16 +++++++-------
.../{cycle_detector.go => cycle_breaker.go} | 2 +-
internal/scheduling/lib/detector_pipeline.go | 2 +-
7 files changed, 35 insertions(+), 35 deletions(-)
rename internal/scheduling/descheduling/nova/{cycle_detector.go => cycle_breaker.go} (80%)
rename internal/scheduling/descheduling/nova/{cycle_detector_test.go => cycle_breaker_test.go} (89%)
rename internal/scheduling/lib/{cycle_detector.go => cycle_breaker.go} (88%)
diff --git a/cmd/main.go b/cmd/main.go
index 304424520..00d9bb08b 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -312,9 +312,9 @@ func main() {
monitor := schedulinglib.NewDetectorPipelineMonitor()
metrics.Registry.MustRegister(&monitor)
deschedulingsController := &deschedulingnova.DeschedulingsPipelineController{
- Monitor: monitor,
- Conf: config,
- CycleDetector: deschedulingnova.NewCycleDetector(),
+ Monitor: monitor,
+ Conf: config,
+ CycleBreaker: deschedulingnova.NewCycleBreaker(),
}
// Inferred through the base controller.
deschedulingsController.Client = multiclusterClient
diff --git a/internal/scheduling/descheduling/nova/cycle_detector.go b/internal/scheduling/descheduling/nova/cycle_breaker.go
similarity index 80%
rename from internal/scheduling/descheduling/nova/cycle_detector.go
rename to internal/scheduling/descheduling/nova/cycle_breaker.go
index 7bb7405b0..1eba574b3 100644
--- a/internal/scheduling/descheduling/nova/cycle_detector.go
+++ b/internal/scheduling/descheduling/nova/cycle_breaker.go
@@ -12,21 +12,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type cycleDetector struct {
+type cycleBreaker struct {
// Nova API to get needed information for cycle detection.
novaAPI NovaAPI
}
-func NewCycleDetector() lib.CycleDetector[plugins.VMDetection] {
- return &cycleDetector{novaAPI: NewNovaAPI()}
+func NewCycleBreaker() lib.CycleBreaker[plugins.VMDetection] {
+ return &cycleBreaker{novaAPI: NewNovaAPI()}
}
// Initialize the cycle detector.
-func (c *cycleDetector) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (c *cycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return c.novaAPI.Init(ctx, client, conf)
}
-func (c *cycleDetector) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
+func (c *cycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
keep := make(map[string]struct{}, len(decisions))
for _, decision := range decisions {
// Get the migrations for the VM.
diff --git a/internal/scheduling/descheduling/nova/cycle_detector_test.go b/internal/scheduling/descheduling/nova/cycle_breaker_test.go
similarity index 89%
rename from internal/scheduling/descheduling/nova/cycle_detector_test.go
rename to internal/scheduling/descheduling/nova/cycle_breaker_test.go
index c784436c1..f2140f856 100644
--- a/internal/scheduling/descheduling/nova/cycle_detector_test.go
+++ b/internal/scheduling/descheduling/nova/cycle_breaker_test.go
@@ -13,24 +13,24 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockCycleDetectorNovaAPI struct {
+type mockCycleBreakerNovaAPI struct {
migrations map[string][]migration
getError error
}
-func (m *mockCycleDetectorNovaAPI) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (m *mockCycleBreakerNovaAPI) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return nil
}
-func (m *mockCycleDetectorNovaAPI) Get(ctx context.Context, id string) (server, error) {
+func (m *mockCycleBreakerNovaAPI) Get(ctx context.Context, id string) (server, error) {
return server{}, errors.New("not implemented")
}
-func (m *mockCycleDetectorNovaAPI) LiveMigrate(ctx context.Context, id string) error {
+func (m *mockCycleBreakerNovaAPI) LiveMigrate(ctx context.Context, id string) error {
return errors.New("not implemented")
}
-func (m *mockCycleDetectorNovaAPI) GetServerMigrations(ctx context.Context, id string) ([]migration, error) {
+func (m *mockCycleBreakerNovaAPI) GetServerMigrations(ctx context.Context, id string) ([]migration, error) {
if m.getError != nil {
return nil, m.getError
}
@@ -40,7 +40,7 @@ func (m *mockCycleDetectorNovaAPI) GetServerMigrations(ctx context.Context, id s
return []migration{}, nil
}
-func TestCycleDetector_Filter(t *testing.T) {
+func TestCycleBreaker_Filter(t *testing.T) {
tests := []struct {
name string
decisions []plugins.VMDetection
@@ -170,7 +170,7 @@ func TestCycleDetector_Filter(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- mockAPI := &mockCycleDetectorNovaAPI{
+ mockAPI := &mockCycleBreakerNovaAPI{
migrations: tt.migrations,
}
@@ -178,7 +178,7 @@ func TestCycleDetector_Filter(t *testing.T) {
mockAPI.getError = errors.New("API error")
}
- detector := cycleDetector{novaAPI: mockAPI}
+ detector := cycleBreaker{novaAPI: mockAPI}
ctx := context.Background()
result, err := detector.Filter(ctx, tt.decisions)
@@ -227,12 +227,12 @@ func TestCycleDetector_Filter(t *testing.T) {
}
}
-func TestCycleDetector_Filter_EmptyVMDetections(t *testing.T) {
- mockAPI := &mockCycleDetectorNovaAPI{
+func TestCycleBreaker_Filter_EmptyVMDetections(t *testing.T) {
+ mockAPI := &mockCycleBreakerNovaAPI{
migrations: map[string][]migration{},
}
- detector := cycleDetector{novaAPI: mockAPI}
+ detector := cycleBreaker{novaAPI: mockAPI}
ctx := context.Background()
result, err := detector.Filter(ctx, []plugins.VMDetection{})
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/descheduling/nova/pipeline_controller.go
index 6ed8f685d..54a633900 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller.go
@@ -38,7 +38,7 @@ type DeschedulingsPipelineController struct {
// Config for the scheduling operator.
Conf conf.Config
// Cycle detector to avoid descheduling loops.
- CycleDetector lib.CycleDetector[plugins.VMDetection]
+ CycleBreaker lib.CycleBreaker[plugins.VMDetection]
}
// The type of pipeline this controller manages.
@@ -53,9 +53,9 @@ func (c *DeschedulingsPipelineController) InitPipeline(
) lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]] {
pipeline := &lib.DetectorPipeline[plugins.VMDetection]{
- Client: c.Client,
- CycleDetector: c.CycleDetector,
- Monitor: c.Monitor.SubPipeline(p.Name),
+ Client: c.Client,
+ CycleBreaker: c.CycleBreaker,
+ Monitor: c.Monitor.SubPipeline(p.Name),
}
nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
return lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]]{
@@ -88,7 +88,7 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
slog.Info("descheduler: decisions made", "decisionsByStep", decisionsByStep)
decisions := p.Combine(decisionsByStep)
var err error
- decisions, err = p.CycleDetector.Filter(ctx, decisions)
+ decisions, err = p.CycleBreaker.Filter(ctx, decisions)
if err != nil {
slog.Error("descheduler: failed to filter decisions for cycles", "error", err)
time.Sleep(jobloop.DefaultJitter(time.Minute))
@@ -136,7 +136,7 @@ func (c *DeschedulingsPipelineController) SetupWithManager(mgr ctrl.Manager, mcl
c.SchedulingDomain = v1alpha1.SchedulingDomainNova
if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
// Initialize the cycle detector.
- return c.CycleDetector.Init(ctx, mgr.GetClient(), c.Conf)
+ return c.CycleBreaker.Init(ctx, mgr.GetClient(), c.Conf)
})); err != nil {
return err
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
index 3ff81bf44..05a32981f 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/descheduling/nova/pipeline_controller_test.go
@@ -18,13 +18,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-type mockCycleDetector struct{}
+type mockCycleBreaker struct{}
-func (m *mockCycleDetector) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (m *mockCycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return nil
}
-func (m *mockCycleDetector) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
+func (m *mockCycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
return decisions, nil
}
@@ -75,13 +75,13 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
controller := &DeschedulingsPipelineController{
- Monitor: lib.NewDetectorPipelineMonitor(),
- CycleDetector: &mockCycleDetector{},
+ Monitor: lib.NewDetectorPipelineMonitor(),
+ CycleBreaker: &mockCycleBreaker{},
}
pipeline := lib.DetectorPipeline[plugins.VMDetection]{
- CycleDetector: controller.CycleDetector,
- Monitor: controller.Monitor,
+ CycleBreaker: controller.CycleBreaker,
+ Monitor: controller.Monitor,
}
nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]lib.Detector[plugins.VMDetection]{
"mock-step": &mockControllerStep{},
@@ -107,7 +107,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
}
}
- if pipeline.CycleDetector != controller.CycleDetector {
+ if pipeline.CycleBreaker != controller.CycleBreaker {
t.Error("expected pipeline to have cycle detector set")
}
diff --git a/internal/scheduling/lib/cycle_detector.go b/internal/scheduling/lib/cycle_breaker.go
similarity index 88%
rename from internal/scheduling/lib/cycle_detector.go
rename to internal/scheduling/lib/cycle_breaker.go
index 9c9796652..5dbe78143 100644
--- a/internal/scheduling/lib/cycle_detector.go
+++ b/internal/scheduling/lib/cycle_breaker.go
@@ -10,7 +10,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type CycleDetector[DetectionType Detection] interface {
+type CycleBreaker[DetectionType Detection] interface {
// Initialize the cycle detector with needed clients.
Init(ctx context.Context, client client.Client, conf conf.Config) error
// Filter descheduling decisions to avoid cycles.
diff --git a/internal/scheduling/lib/detector_pipeline.go b/internal/scheduling/lib/detector_pipeline.go
index 61b46f04d..2f4de2380 100644
--- a/internal/scheduling/lib/detector_pipeline.go
+++ b/internal/scheduling/lib/detector_pipeline.go
@@ -20,7 +20,7 @@ type DetectorPipeline[DetectionType Detection] struct {
// Kubernetes client to create descheduling resources.
client.Client
// Cycle detector to avoid cycles in descheduling.
- CycleDetector CycleDetector[DetectionType]
+ CycleBreaker CycleBreaker[DetectionType]
// Monitor to use for tracking the pipeline.
Monitor DetectorPipelineMonitor
From e53633176cca69c22f99876efc7a19d7d955161f Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 09:46:55 +0100
Subject: [PATCH 31/41] Split supported_steps ->
supported_[filters|weighers|detectors]
---
.../decisions/cinder/supported_filters.go | 14 +++++++++++++
...pported_steps.go => supported_weighers.go} | 5 -----
...upported_steps.go => supported_filters.go} | 5 -----
.../decisions/machines/supported_weighers.go | 14 +++++++++++++
.../decisions/manila/supported_filters.go | 14 +++++++++++++
...pported_steps.go => supported_weighers.go} | 5 -----
...upported_steps.go => supported_filters.go} | 12 -----------
.../decisions/nova/supported_weighers.go | 21 +++++++++++++++++++
...upported_steps.go => supported_filters.go} | 8 -------
.../decisions/pods/supported_weighers.go | 17 +++++++++++++++
...ported_steps.go => supported_detectors.go} | 0
11 files changed, 80 insertions(+), 35 deletions(-)
create mode 100644 internal/scheduling/decisions/cinder/supported_filters.go
rename internal/scheduling/decisions/cinder/{supported_steps.go => supported_weighers.go} (68%)
rename internal/scheduling/decisions/machines/{supported_steps.go => supported_filters.go} (70%)
create mode 100644 internal/scheduling/decisions/machines/supported_weighers.go
create mode 100644 internal/scheduling/decisions/manila/supported_filters.go
rename internal/scheduling/decisions/manila/{supported_steps.go => supported_weighers.go} (76%)
rename internal/scheduling/decisions/nova/{supported_steps.go => supported_filters.go} (70%)
create mode 100644 internal/scheduling/decisions/nova/supported_weighers.go
rename internal/scheduling/decisions/pods/{supported_steps.go => supported_filters.go} (69%)
create mode 100644 internal/scheduling/decisions/pods/supported_weighers.go
rename internal/scheduling/descheduling/nova/{supported_steps.go => supported_detectors.go} (100%)
diff --git a/internal/scheduling/decisions/cinder/supported_filters.go b/internal/scheduling/decisions/cinder/supported_filters.go
new file mode 100644
index 000000000..f751ee9dd
--- /dev/null
+++ b/internal/scheduling/decisions/cinder/supported_filters.go
@@ -0,0 +1,14 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package cinder
+
+import (
+ api "github.com/cobaltcore-dev/cortex/api/delegation/cinder"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+type CinderFilter = lib.Filter[api.ExternalSchedulerRequest]
+
+// Configuration of filters supported by the cinder scheduling.
+var supportedFilters = map[string]func() CinderFilter{}
diff --git a/internal/scheduling/decisions/cinder/supported_steps.go b/internal/scheduling/decisions/cinder/supported_weighers.go
similarity index 68%
rename from internal/scheduling/decisions/cinder/supported_steps.go
rename to internal/scheduling/decisions/cinder/supported_weighers.go
index 90e5dc95d..cc45cf26e 100644
--- a/internal/scheduling/decisions/cinder/supported_steps.go
+++ b/internal/scheduling/decisions/cinder/supported_weighers.go
@@ -12,8 +12,3 @@ type CinderWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the cinder scheduling.
var supportedWeighers = map[string]func() CinderWeigher{}
-
-type CinderFilter = lib.Filter[api.ExternalSchedulerRequest]
-
-// Configuration of filters supported by the cinder scheduling.
-var supportedFilters = map[string]func() CinderFilter{}
diff --git a/internal/scheduling/decisions/machines/supported_steps.go b/internal/scheduling/decisions/machines/supported_filters.go
similarity index 70%
rename from internal/scheduling/decisions/machines/supported_steps.go
rename to internal/scheduling/decisions/machines/supported_filters.go
index 4e04d64d1..5dcee9a54 100644
--- a/internal/scheduling/decisions/machines/supported_steps.go
+++ b/internal/scheduling/decisions/machines/supported_filters.go
@@ -8,11 +8,6 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type MachineWeigher = lib.Weigher[ironcore.MachinePipelineRequest]
-
-// Configuration of weighers supported by the machine scheduling.
-var supportedWeighers = map[string]func() MachineWeigher{}
-
type MachineFilter = lib.Filter[ironcore.MachinePipelineRequest]
// Configuration of filters supported by the machine scheduling.
diff --git a/internal/scheduling/decisions/machines/supported_weighers.go b/internal/scheduling/decisions/machines/supported_weighers.go
new file mode 100644
index 000000000..329606cc1
--- /dev/null
+++ b/internal/scheduling/decisions/machines/supported_weighers.go
@@ -0,0 +1,14 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package machines
+
+import (
+ "github.com/cobaltcore-dev/cortex/api/delegation/ironcore"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+type MachineWeigher = lib.Weigher[ironcore.MachinePipelineRequest]
+
+// Configuration of weighers supported by the machine scheduling.
+var supportedWeighers = map[string]func() MachineWeigher{}
diff --git a/internal/scheduling/decisions/manila/supported_filters.go b/internal/scheduling/decisions/manila/supported_filters.go
new file mode 100644
index 000000000..ed86e3f5f
--- /dev/null
+++ b/internal/scheduling/decisions/manila/supported_filters.go
@@ -0,0 +1,14 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package manila
+
+import (
+ api "github.com/cobaltcore-dev/cortex/api/delegation/manila"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+type ManilaFilter = lib.Filter[api.ExternalSchedulerRequest]
+
+// Configuration of filters supported by the manila scheduler.
+var supportedFilters = map[string]func() ManilaFilter{}
diff --git a/internal/scheduling/decisions/manila/supported_steps.go b/internal/scheduling/decisions/manila/supported_weighers.go
similarity index 76%
rename from internal/scheduling/decisions/manila/supported_steps.go
rename to internal/scheduling/decisions/manila/supported_weighers.go
index fca819711..ae370cd26 100644
--- a/internal/scheduling/decisions/manila/supported_steps.go
+++ b/internal/scheduling/decisions/manila/supported_weighers.go
@@ -9,11 +9,6 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
-type ManilaFilter = lib.Filter[api.ExternalSchedulerRequest]
-
-// Configuration of filters supported by the manila scheduler.
-var supportedFilters = map[string]func() ManilaFilter{}
-
type ManilaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
// Configuration of weighers supported by the manila scheduler.
diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_filters.go
similarity index 70%
rename from internal/scheduling/decisions/nova/supported_steps.go
rename to internal/scheduling/decisions/nova/supported_filters.go
index 4821e7ba5..17e858714 100644
--- a/internal/scheduling/decisions/nova/supported_steps.go
+++ b/internal/scheduling/decisions/nova/supported_filters.go
@@ -6,7 +6,6 @@ package nova
import (
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova/plugins/filters"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova/plugins/weighers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
@@ -30,14 +29,3 @@ var supportedFilters = map[string]func() NovaFilter{
"filter_live_migratable": func() NovaFilter { return &filters.FilterLiveMigratableStep{} },
"filter_requested_destination": func() NovaFilter { return &filters.FilterRequestedDestinationStep{} },
}
-
-type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
-
-// Configuration of weighers supported by the nova scheduler.
-var supportedWeighers = map[string]func() NovaWeigher{
- "vmware_anti_affinity_noisy_projects": func() NovaWeigher { return &weighers.VMwareAntiAffinityNoisyProjectsStep{} },
- "vmware_avoid_long_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidLongTermContendedHostsStep{} },
- "vmware_avoid_short_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidShortTermContendedHostsStep{} },
- "vmware_hana_binpacking": func() NovaWeigher { return &weighers.VMwareHanaBinpackingStep{} },
- "vmware_general_purpose_balancing": func() NovaWeigher { return &weighers.VMwareGeneralPurposeBalancingStep{} },
-}
diff --git a/internal/scheduling/decisions/nova/supported_weighers.go b/internal/scheduling/decisions/nova/supported_weighers.go
new file mode 100644
index 000000000..ed116a514
--- /dev/null
+++ b/internal/scheduling/decisions/nova/supported_weighers.go
@@ -0,0 +1,21 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package nova
+
+import (
+ api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova/plugins/weighers"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
+
+// Configuration of weighers supported by the nova scheduler.
+var supportedWeighers = map[string]func() NovaWeigher{
+ "vmware_anti_affinity_noisy_projects": func() NovaWeigher { return &weighers.VMwareAntiAffinityNoisyProjectsStep{} },
+ "vmware_avoid_long_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidLongTermContendedHostsStep{} },
+ "vmware_avoid_short_term_contended_hosts": func() NovaWeigher { return &weighers.VMwareAvoidShortTermContendedHostsStep{} },
+ "vmware_hana_binpacking": func() NovaWeigher { return &weighers.VMwareHanaBinpackingStep{} },
+ "vmware_general_purpose_balancing": func() NovaWeigher { return &weighers.VMwareGeneralPurposeBalancingStep{} },
+}
diff --git a/internal/scheduling/decisions/pods/supported_steps.go b/internal/scheduling/decisions/pods/supported_filters.go
similarity index 69%
rename from internal/scheduling/decisions/pods/supported_steps.go
rename to internal/scheduling/decisions/pods/supported_filters.go
index 43c8f1ac2..9d0eb5d43 100644
--- a/internal/scheduling/decisions/pods/supported_steps.go
+++ b/internal/scheduling/decisions/pods/supported_filters.go
@@ -6,7 +6,6 @@ package pods
import (
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/plugins/filters"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/plugins/weighers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
)
@@ -19,10 +18,3 @@ var supportedFilters = map[string]func() PodFilter{
"nodeaffinity": func() PodFilter { return &filters.NodeAffinityFilter{} },
"nodecapacity": func() PodFilter { return &filters.NodeCapacityFilter{} },
}
-
-type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
-
-// Configuration of weighers supported by the pods scheduler.
-var supportedWeighers = map[string]func() PodWeigher{
- "binpack": func() PodWeigher { return &weighers.BinpackingStep{} },
-}
diff --git a/internal/scheduling/decisions/pods/supported_weighers.go b/internal/scheduling/decisions/pods/supported_weighers.go
new file mode 100644
index 000000000..ff0449100
--- /dev/null
+++ b/internal/scheduling/decisions/pods/supported_weighers.go
@@ -0,0 +1,17 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package pods
+
+import (
+ "github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/plugins/weighers"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
+
+// Configuration of weighers supported by the pods scheduler.
+var supportedWeighers = map[string]func() PodWeigher{
+ "binpack": func() PodWeigher { return &weighers.BinpackingStep{} },
+}
diff --git a/internal/scheduling/descheduling/nova/supported_steps.go b/internal/scheduling/descheduling/nova/supported_detectors.go
similarity index 100%
rename from internal/scheduling/descheduling/nova/supported_steps.go
rename to internal/scheduling/descheduling/nova/supported_detectors.go
From 729ecd65e8a19287c18145f0de25d5ed18175c8b Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:05:46 +0100
Subject: [PATCH 32/41] Fuse nova scheduling into scheduling/nova
---
cmd/main.go | 19 +++++++-------
...e_breaker.go => detector_cycle_breaker.go} | 2 +-
internal/scheduling/lib/detector_pipeline.go | 2 +-
.../cleanup.go => nova/decisions_cleanup.go} | 2 +-
.../decisions_cleanup_test.go} | 4 +--
.../deschedulings_cleanup.go} | 12 ++++-----
.../deschedulings_cleanup_test.go} | 16 ++++++------
.../deschedulings_executor.go} | 10 +++----
.../deschedulings_executor_test.go} | 8 +++---
.../detector_cycle_breaker.go} | 12 ++++-----
.../detector_cycle_breaker_test.go} | 24 ++++++++---------
.../detector_pipeline_controller.go} | 26 +++++++++----------
.../detector_pipeline_controller_test.go} | 26 +++++++++----------
.../api.go => nova/external_scheduler_api.go} | 2 +-
.../external_scheduler_api_test.go} | 2 +-
.../filter_weigher_pipeline_controller.go} | 0
...ilter_weigher_pipeline_controller_test.go} | 0
.../{descheduling => }/nova/nova_api.go | 0
.../{descheduling => }/nova/nova_api_test.go | 0
.../detectors}/avoid_high_steal_pct.go | 4 +--
.../detectors}/avoid_high_steal_pct_test.go | 2 +-
.../filters/filter_allowed_projects.go | 0
.../filters/filter_allowed_projects_test.go | 0
.../plugins/filters/filter_capabilities.go | 0
.../filters/filter_capabilities_test.go | 0
.../nova/plugins/filters/filter_correct_az.go | 0
.../plugins/filters/filter_correct_az_test.go | 0
.../filters/filter_external_customer.go | 0
.../filters/filter_external_customer_test.go | 0
.../filters/filter_has_accelerators.go | 0
.../filters/filter_has_accelerators_test.go | 0
.../filters/filter_has_enough_capacity.go | 0
.../filter_has_enough_capacity_test.go | 0
.../filters/filter_has_requested_traits.go | 0
.../filter_has_requested_traits_test.go | 0
.../filters/filter_host_instructions.go | 0
.../filters/filter_host_instructions_test.go | 0
.../filters/filter_instance_group_affinity.go | 0
.../filter_instance_group_affinity_test.go | 0
.../filter_instance_group_anti_affinity.go | 0
...ilter_instance_group_anti_affinity_test.go | 0
.../plugins/filters/filter_live_migratable.go | 0
.../filters/filter_live_migratable_test.go | 0
.../plugins/filters/filter_maintenance.go | 0
.../filters/filter_maintenance_test.go | 0
.../filters/filter_packed_virtqueue.go | 0
.../filters/filter_packed_virtqueue_test.go | 0
.../filters/filter_requested_destination.go | 0
.../filter_requested_destination_test.go | 0
.../filters/filter_status_conditions.go | 0
.../filters/filter_status_conditions_test.go | 0
.../nova/plugins/vm_detection.go | 0
.../vmware_anti_affinity_noisy_projects.go | 0
...mware_anti_affinity_noisy_projects_test.go | 0
.../vmware_avoid_long_term_contended_hosts.go | 0
...re_avoid_long_term_contended_hosts_test.go | 0
...vmware_avoid_short_term_contended_hosts.go | 0
...e_avoid_short_term_contended_hosts_test.go | 0
.../vmware_general_purpose_balancing.go | 0
.../vmware_general_purpose_balancing_test.go | 0
.../weighers/vmware_hana_binpacking.go | 0
.../weighers/vmware_hana_binpacking_test.go | 0
.../nova/supported_detectors.go | 6 ++---
.../{decisions => }/nova/supported_filters.go | 2 +-
.../nova/supported_weighers.go | 2 +-
65 files changed, 91 insertions(+), 92 deletions(-)
rename internal/scheduling/lib/{cycle_breaker.go => detector_cycle_breaker.go} (87%)
rename internal/scheduling/{decisions/nova/cleanup.go => nova/decisions_cleanup.go} (97%)
rename internal/scheduling/{decisions/nova/cleanup_test.go => nova/decisions_cleanup_test.go} (98%)
rename internal/scheduling/{descheduling/nova/cleanup.go => nova/deschedulings_cleanup.go} (88%)
rename internal/scheduling/{descheduling/nova/cleanup_test.go => nova/deschedulings_cleanup_test.go} (96%)
rename internal/scheduling/{descheduling/nova/executor.go => nova/deschedulings_executor.go} (96%)
rename internal/scheduling/{descheduling/nova/executor_test.go => nova/deschedulings_executor_test.go} (98%)
rename internal/scheduling/{descheduling/nova/cycle_breaker.go => nova/detector_cycle_breaker.go} (74%)
rename internal/scheduling/{descheduling/nova/cycle_breaker_test.go => nova/detector_cycle_breaker_test.go} (87%)
rename internal/scheduling/{descheduling/nova/pipeline_controller.go => nova/detector_pipeline_controller.go} (87%)
rename internal/scheduling/{descheduling/nova/pipeline_controller_test.go => nova/detector_pipeline_controller_test.go} (77%)
rename internal/scheduling/{external/nova/api.go => nova/external_scheduler_api.go} (99%)
rename internal/scheduling/{external/nova/api_test.go => nova/external_scheduler_api_test.go} (99%)
rename internal/scheduling/{decisions/nova/pipeline_controller.go => nova/filter_weigher_pipeline_controller.go} (100%)
rename internal/scheduling/{decisions/nova/pipeline_controller_test.go => nova/filter_weigher_pipeline_controller_test.go} (100%)
rename internal/scheduling/{descheduling => }/nova/nova_api.go (100%)
rename internal/scheduling/{descheduling => }/nova/nova_api_test.go (100%)
rename internal/scheduling/{descheduling/nova/plugins/kvm => nova/plugins/detectors}/avoid_high_steal_pct.go (96%)
rename internal/scheduling/{descheduling/nova/plugins/kvm => nova/plugins/detectors}/avoid_high_steal_pct_test.go (99%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_allowed_projects.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_allowed_projects_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_capabilities.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_capabilities_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_correct_az.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_correct_az_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_external_customer.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_external_customer_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_accelerators.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_accelerators_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_enough_capacity.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_enough_capacity_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_requested_traits.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_has_requested_traits_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_host_instructions.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_host_instructions_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_instance_group_affinity.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_instance_group_affinity_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_instance_group_anti_affinity.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_instance_group_anti_affinity_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_live_migratable.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_live_migratable_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_maintenance.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_maintenance_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_packed_virtqueue.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_packed_virtqueue_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_requested_destination.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_requested_destination_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_status_conditions.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/filters/filter_status_conditions_test.go (100%)
rename internal/scheduling/{descheduling => }/nova/plugins/vm_detection.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_general_purpose_balancing.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_general_purpose_balancing_test.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_hana_binpacking.go (100%)
rename internal/scheduling/{decisions => }/nova/plugins/weighers/vmware_hana_binpacking_test.go (100%)
rename internal/scheduling/{descheduling => }/nova/supported_detectors.go (61%)
rename internal/scheduling/{decisions => }/nova/supported_filters.go (95%)
rename internal/scheduling/{decisions => }/nova/supported_weighers.go (91%)
diff --git a/cmd/main.go b/cmd/main.go
index 00d9bb08b..673ff85ec 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -42,16 +42,15 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/explanation"
decisionsmachines "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/machines"
decisionsmanila "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/manila"
- decisionsnova "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova"
decisionpods "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods"
- deschedulingnova "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova"
cindere2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/cinder"
manilae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/manila"
novae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/nova"
cinderexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/cinder"
manilaexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/manila"
- novaexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/nova"
schedulinglib "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
+ decisionsnova "github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/commitments"
reservationscontroller "github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/controller"
"github.com/cobaltcore-dev/cortex/pkg/conf"
@@ -305,16 +304,16 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "DecisionReconciler")
os.Exit(1)
}
- novaexternal.NewAPI(config, decisionController).Init(mux)
+ nova.NewAPI(config, decisionController).Init(mux)
}
if slices.Contains(config.EnabledControllers, "nova-deschedulings-pipeline-controller") {
// Deschedulings controller
monitor := schedulinglib.NewDetectorPipelineMonitor()
metrics.Registry.MustRegister(&monitor)
- deschedulingsController := &deschedulingnova.DeschedulingsPipelineController{
- Monitor: monitor,
- Conf: config,
- CycleBreaker: deschedulingnova.NewCycleBreaker(),
+ deschedulingsController := &nova.DetectorPipelineController{
+ Monitor: monitor,
+ Conf: config,
+ DetectorCycleBreaker: nova.NewDetectorCycleBreaker(),
}
// Inferred through the base controller.
deschedulingsController.Client = multiclusterClient
@@ -324,7 +323,7 @@ func main() {
}
go deschedulingsController.CreateDeschedulingsPeriodically(ctx)
// Deschedulings cleanup on startup
- if err := (&deschedulingnova.Cleanup{
+ if err := (&nova.DeschedulingsCleanup{
Client: multiclusterClient,
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr, multiclusterClient); err != nil {
@@ -512,7 +511,7 @@ func main() {
Interval: time.Hour,
Name: "nova-decisions-cleanup-task",
Run: func(ctx context.Context) error {
- return decisionsnova.Cleanup(ctx, multiclusterClient, config)
+ return nova.DecisionsCleanup(ctx, multiclusterClient, config)
},
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to add nova decisions cleanup task to manager")
diff --git a/internal/scheduling/lib/cycle_breaker.go b/internal/scheduling/lib/detector_cycle_breaker.go
similarity index 87%
rename from internal/scheduling/lib/cycle_breaker.go
rename to internal/scheduling/lib/detector_cycle_breaker.go
index 5dbe78143..1a9d01509 100644
--- a/internal/scheduling/lib/cycle_breaker.go
+++ b/internal/scheduling/lib/detector_cycle_breaker.go
@@ -10,7 +10,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type CycleBreaker[DetectionType Detection] interface {
+type DetectorCycleBreaker[DetectionType Detection] interface {
// Initialize the cycle detector with needed clients.
Init(ctx context.Context, client client.Client, conf conf.Config) error
// Filter descheduling decisions to avoid cycles.
diff --git a/internal/scheduling/lib/detector_pipeline.go b/internal/scheduling/lib/detector_pipeline.go
index 2f4de2380..e7a6c9758 100644
--- a/internal/scheduling/lib/detector_pipeline.go
+++ b/internal/scheduling/lib/detector_pipeline.go
@@ -20,7 +20,7 @@ type DetectorPipeline[DetectionType Detection] struct {
// Kubernetes client to create descheduling resources.
client.Client
// Cycle detector to avoid cycles in descheduling.
- CycleBreaker CycleBreaker[DetectionType]
+ DetectorCycleBreaker DetectorCycleBreaker[DetectionType]
// Monitor to use for tracking the pipeline.
Monitor DetectorPipelineMonitor
diff --git a/internal/scheduling/decisions/nova/cleanup.go b/internal/scheduling/nova/decisions_cleanup.go
similarity index 97%
rename from internal/scheduling/decisions/nova/cleanup.go
rename to internal/scheduling/nova/decisions_cleanup.go
index 5a6524450..57b52355d 100644
--- a/internal/scheduling/decisions/nova/cleanup.go
+++ b/internal/scheduling/nova/decisions_cleanup.go
@@ -20,7 +20,7 @@ import (
)
// Delete all decisions for nova servers that have been deleted.
-func Cleanup(ctx context.Context, client client.Client, conf conf.Config) error {
+func DecisionsCleanup(ctx context.Context, client client.Client, conf conf.Config) error {
var authenticatedHTTP = http.DefaultClient
if conf.SSOSecretRef != nil {
var err error
diff --git a/internal/scheduling/decisions/nova/cleanup_test.go b/internal/scheduling/nova/decisions_cleanup_test.go
similarity index 98%
rename from internal/scheduling/decisions/nova/cleanup_test.go
rename to internal/scheduling/nova/decisions_cleanup_test.go
index 6bfab7a00..414cfe0c4 100644
--- a/internal/scheduling/decisions/nova/cleanup_test.go
+++ b/internal/scheduling/nova/decisions_cleanup_test.go
@@ -342,7 +342,7 @@ func TestCleanupNova(t *testing.T) {
Namespace: "default",
},
}
- err := Cleanup(context.Background(), client, config)
+ err := DecisionsCleanup(context.Background(), client, config)
if tt.expectError && err == nil {
t.Error("Expected error but got none")
@@ -429,7 +429,7 @@ func TestCleanupNovaDecisionsCancel(t *testing.T) {
defer cancel()
// This should exit quickly due to context cancellation
- if err := Cleanup(ctx, client, config); err != nil {
+ if err := DecisionsCleanup(ctx, client, config); err != nil {
if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("Unexpected error during cleanup: %v", err)
}
diff --git a/internal/scheduling/descheduling/nova/cleanup.go b/internal/scheduling/nova/deschedulings_cleanup.go
similarity index 88%
rename from internal/scheduling/descheduling/nova/cleanup.go
rename to internal/scheduling/nova/deschedulings_cleanup.go
index 46410ae44..bc8d09016 100644
--- a/internal/scheduling/descheduling/nova/cleanup.go
+++ b/internal/scheduling/nova/deschedulings_cleanup.go
@@ -17,10 +17,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
-type CleanupOnStartup struct{ *Cleanup }
+type DeschedulingsCleanupOnStartup struct{ *DeschedulingsCleanup }
// Cleanup all old deschedulings on controller startup.
-func (s *CleanupOnStartup) Start(ctx context.Context) error {
+func (s *DeschedulingsCleanupOnStartup) Start(ctx context.Context) error {
log := logf.FromContext(ctx).WithName("ttl-startup-reconciler")
log.Info("starting descheduling cleanup for existing resources")
var resources v1alpha1.DeschedulingList
@@ -52,14 +52,14 @@ func (s *CleanupOnStartup) Start(ctx context.Context) error {
}
// Removes old deschedulings.
-type Cleanup struct {
+type DeschedulingsCleanup struct {
// Client for the kubernetes API.
client.Client
// Kubernetes scheme to use for the deschedulings.
Scheme *runtime.Scheme
}
-func (r *Cleanup) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (r *DeschedulingsCleanup) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx).WithName("cleanup")
// Fetch the descheduling object
@@ -91,8 +91,8 @@ func (r *Cleanup) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result,
return ctrl.Result{}, nil
}
-func (r *Cleanup) SetupWithManager(mgr ctrl.Manager, mcl *multicluster.Client) error {
- if err := mgr.Add(&CleanupOnStartup{r}); err != nil {
+func (r *DeschedulingsCleanup) SetupWithManager(mgr ctrl.Manager, mcl *multicluster.Client) error {
+ if err := mgr.Add(&DeschedulingsCleanupOnStartup{r}); err != nil {
return err
}
return multicluster.BuildController(mcl, mgr).
diff --git a/internal/scheduling/descheduling/nova/cleanup_test.go b/internal/scheduling/nova/deschedulings_cleanup_test.go
similarity index 96%
rename from internal/scheduling/descheduling/nova/cleanup_test.go
rename to internal/scheduling/nova/deschedulings_cleanup_test.go
index 6458c57a8..cf913442f 100644
--- a/internal/scheduling/descheduling/nova/cleanup_test.go
+++ b/internal/scheduling/nova/deschedulings_cleanup_test.go
@@ -158,7 +158,7 @@ func TestCleanup_Reconcile(t *testing.T) {
WithObjects(tt.descheduling).
Build()
- cleanup := &Cleanup{
+ cleanup := &DeschedulingsCleanup{
Client: fakeClient,
Scheme: scheme,
}
@@ -220,7 +220,7 @@ func TestCleanup_Reconcile_NonexistentResource(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
- cleanup := &Cleanup{
+ cleanup := &DeschedulingsCleanup{
Client: fakeClient,
Scheme: scheme,
}
@@ -305,13 +305,13 @@ func TestCleanupOnStartup_Start(t *testing.T) {
WithObjects(objects...).
Build()
- cleanup := &Cleanup{
+ cleanup := &DeschedulingsCleanup{
Client: fakeClient,
Scheme: scheme,
}
- cleanupOnStartup := &CleanupOnStartup{
- Cleanup: cleanup,
+ cleanupOnStartup := &DeschedulingsCleanupOnStartup{
+ DeschedulingsCleanup: cleanup,
}
ctx := context.Background()
@@ -352,13 +352,13 @@ func TestCleanupOnStartup_Start_EmptyList(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
- cleanup := &Cleanup{
+ cleanup := &DeschedulingsCleanup{
Client: fakeClient,
Scheme: scheme,
}
- cleanupOnStartup := &CleanupOnStartup{
- Cleanup: cleanup,
+ cleanupOnStartup := &DeschedulingsCleanupOnStartup{
+ DeschedulingsCleanup: cleanup,
}
ctx := context.Background()
diff --git a/internal/scheduling/descheduling/nova/executor.go b/internal/scheduling/nova/deschedulings_executor.go
similarity index 96%
rename from internal/scheduling/descheduling/nova/executor.go
rename to internal/scheduling/nova/deschedulings_executor.go
index a0f5579d8..1e1cd17e1 100644
--- a/internal/scheduling/descheduling/nova/executor.go
+++ b/internal/scheduling/nova/deschedulings_executor.go
@@ -9,8 +9,8 @@ import (
"time"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"github.com/cobaltcore-dev/cortex/pkg/multicluster"
@@ -26,7 +26,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
-type Executor struct {
+type DeschedulingsExecutor struct {
// Client for the kubernetes API.
client.Client
// Kubernetes scheme to use for the deschedulings.
@@ -42,7 +42,7 @@ type Executor struct {
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
-func (e *Executor) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (e *DeschedulingsExecutor) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
descheduling := &v1alpha1.Descheduling{}
@@ -253,9 +253,9 @@ func (e *Executor) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result
return ctrl.Result{}, nil
}
-func (s *Executor) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (s *DeschedulingsExecutor) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
return multicluster.BuildController(mcl, mgr).
- Named("cortex-descheduler").
+ Named("cortex-nova-deschedulings-executor").
For(
&v1alpha1.Descheduling{},
// Only schedule machines that have the custom scheduler set.
diff --git a/internal/scheduling/descheduling/nova/executor_test.go b/internal/scheduling/nova/deschedulings_executor_test.go
similarity index 98%
rename from internal/scheduling/descheduling/nova/executor_test.go
rename to internal/scheduling/nova/deschedulings_executor_test.go
index e9ace2b2a..54eace86d 100644
--- a/internal/scheduling/descheduling/nova/executor_test.go
+++ b/internal/scheduling/nova/deschedulings_executor_test.go
@@ -10,8 +10,8 @@ import (
"time"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"k8s.io/apimachinery/pkg/api/meta"
@@ -333,7 +333,7 @@ func TestExecutor_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Descheduling{}).
Build()
- executor := &Executor{
+ executor := &DeschedulingsExecutor{
Client: client,
Scheme: scheme,
NovaAPI: tt.novaAPI,
@@ -405,7 +405,7 @@ func TestExecutor_Reconcile(t *testing.T) {
}
}
-func TestExecutor_ReconcileNotFound(t *testing.T) {
+func TestDeschedulingsExecutor_ReconcileNotFound(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
if err != nil {
@@ -413,7 +413,7 @@ func TestExecutor_ReconcileNotFound(t *testing.T) {
}
client := fake.NewClientBuilder().WithScheme(scheme).Build()
- executor := &Executor{
+ executor := &DeschedulingsExecutor{
Client: client,
Scheme: scheme,
NovaAPI: &mockExecutorNovaAPI{},
diff --git a/internal/scheduling/descheduling/nova/cycle_breaker.go b/internal/scheduling/nova/detector_cycle_breaker.go
similarity index 74%
rename from internal/scheduling/descheduling/nova/cycle_breaker.go
rename to internal/scheduling/nova/detector_cycle_breaker.go
index 1eba574b3..fd3a1bd00 100644
--- a/internal/scheduling/descheduling/nova/cycle_breaker.go
+++ b/internal/scheduling/nova/detector_cycle_breaker.go
@@ -6,27 +6,27 @@ package nova
import (
"context"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type cycleBreaker struct {
+type detectorCycleBreaker struct {
// Nova API to get needed information for cycle detection.
novaAPI NovaAPI
}
-func NewCycleBreaker() lib.CycleBreaker[plugins.VMDetection] {
- return &cycleBreaker{novaAPI: NewNovaAPI()}
+func NewDetectorCycleBreaker() lib.DetectorCycleBreaker[plugins.VMDetection] {
+ return &detectorCycleBreaker{novaAPI: NewNovaAPI()}
}
// Initialize the cycle detector.
-func (c *cycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (c *detectorCycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return c.novaAPI.Init(ctx, client, conf)
}
-func (c *cycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
+func (c *detectorCycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
keep := make(map[string]struct{}, len(decisions))
for _, decision := range decisions {
// Get the migrations for the VM.
diff --git a/internal/scheduling/descheduling/nova/cycle_breaker_test.go b/internal/scheduling/nova/detector_cycle_breaker_test.go
similarity index 87%
rename from internal/scheduling/descheduling/nova/cycle_breaker_test.go
rename to internal/scheduling/nova/detector_cycle_breaker_test.go
index f2140f856..50745cdd0 100644
--- a/internal/scheduling/descheduling/nova/cycle_breaker_test.go
+++ b/internal/scheduling/nova/detector_cycle_breaker_test.go
@@ -8,29 +8,29 @@ import (
"errors"
"testing"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type mockCycleBreakerNovaAPI struct {
+type mockDetectorCycleBreakerNovaAPI struct {
migrations map[string][]migration
getError error
}
-func (m *mockCycleBreakerNovaAPI) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (m *mockDetectorCycleBreakerNovaAPI) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return nil
}
-func (m *mockCycleBreakerNovaAPI) Get(ctx context.Context, id string) (server, error) {
+func (m *mockDetectorCycleBreakerNovaAPI) Get(ctx context.Context, id string) (server, error) {
return server{}, errors.New("not implemented")
}
-func (m *mockCycleBreakerNovaAPI) LiveMigrate(ctx context.Context, id string) error {
+func (m *mockDetectorCycleBreakerNovaAPI) LiveMigrate(ctx context.Context, id string) error {
return errors.New("not implemented")
}
-func (m *mockCycleBreakerNovaAPI) GetServerMigrations(ctx context.Context, id string) ([]migration, error) {
+func (m *mockDetectorCycleBreakerNovaAPI) GetServerMigrations(ctx context.Context, id string) ([]migration, error) {
if m.getError != nil {
return nil, m.getError
}
@@ -40,7 +40,7 @@ func (m *mockCycleBreakerNovaAPI) GetServerMigrations(ctx context.Context, id st
return []migration{}, nil
}
-func TestCycleBreaker_Filter(t *testing.T) {
+func TestDetectorCycleBreaker_Filter(t *testing.T) {
tests := []struct {
name string
decisions []plugins.VMDetection
@@ -170,7 +170,7 @@ func TestCycleBreaker_Filter(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- mockAPI := &mockCycleBreakerNovaAPI{
+ mockAPI := &mockDetectorCycleBreakerNovaAPI{
migrations: tt.migrations,
}
@@ -178,7 +178,7 @@ func TestCycleBreaker_Filter(t *testing.T) {
mockAPI.getError = errors.New("API error")
}
- detector := cycleBreaker{novaAPI: mockAPI}
+ detector := detectorCycleBreaker{novaAPI: mockAPI}
ctx := context.Background()
result, err := detector.Filter(ctx, tt.decisions)
@@ -227,12 +227,12 @@ func TestCycleBreaker_Filter(t *testing.T) {
}
}
-func TestCycleBreaker_Filter_EmptyVMDetections(t *testing.T) {
- mockAPI := &mockCycleBreakerNovaAPI{
+func TestDetectorCycleBreaker_Filter_EmptyVMDetections(t *testing.T) {
+ mockAPI := &mockDetectorCycleBreakerNovaAPI{
migrations: map[string][]migration{},
}
- detector := cycleBreaker{novaAPI: mockAPI}
+ detector := detectorCycleBreaker{novaAPI: mockAPI}
ctx := context.Background()
result, err := detector.Filter(ctx, []plugins.VMDetection{})
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller.go b/internal/scheduling/nova/detector_pipeline_controller.go
similarity index 87%
rename from internal/scheduling/descheduling/nova/pipeline_controller.go
rename to internal/scheduling/nova/detector_pipeline_controller.go
index 54a633900..5b13e62f5 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller.go
+++ b/internal/scheduling/nova/detector_pipeline_controller.go
@@ -10,8 +10,8 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"github.com/cobaltcore-dev/cortex/pkg/multicluster"
"github.com/sapcc/go-bits/jobloop"
@@ -29,7 +29,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DeschedulingsPipelineController struct {
+type DetectorPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[*lib.DetectorPipeline[plugins.VMDetection]]
@@ -38,24 +38,24 @@ type DeschedulingsPipelineController struct {
// Config for the scheduling operator.
Conf conf.Config
// Cycle detector to avoid descheduling loops.
- CycleBreaker lib.CycleBreaker[plugins.VMDetection]
+ DetectorCycleBreaker lib.DetectorCycleBreaker[plugins.VMDetection]
}
// The type of pipeline this controller manages.
-func (c *DeschedulingsPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *DetectorPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeDescheduler
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DeschedulingsPipelineController) InitPipeline(
+func (c *DetectorPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]] {
pipeline := &lib.DetectorPipeline[plugins.VMDetection]{
- Client: c.Client,
- CycleBreaker: c.CycleBreaker,
- Monitor: c.Monitor.SubPipeline(p.Name),
+ Client: c.Client,
+ DetectorCycleBreaker: c.DetectorCycleBreaker,
+ Monitor: c.Monitor.SubPipeline(p.Name),
}
nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
return lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]]{
@@ -65,7 +65,7 @@ func (c *DeschedulingsPipelineController) InitPipeline(
}
}
-func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx context.Context) {
+func (c *DetectorPipelineController) CreateDeschedulingsPeriodically(ctx context.Context) {
for {
select {
case <-ctx.Done():
@@ -88,7 +88,7 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
slog.Info("descheduler: decisions made", "decisionsByStep", decisionsByStep)
decisions := p.Combine(decisionsByStep)
var err error
- decisions, err = p.CycleBreaker.Filter(ctx, decisions)
+ decisions, err = p.DetectorCycleBreaker.Filter(ctx, decisions)
if err != nil {
slog.Error("descheduler: failed to filter decisions for cycles", "error", err)
time.Sleep(jobloop.DefaultJitter(time.Minute))
@@ -126,17 +126,17 @@ func (c *DeschedulingsPipelineController) CreateDeschedulingsPeriodically(ctx co
}
}
-func (c *DeschedulingsPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *DetectorPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// This controller does not reconcile any resources directly.
return ctrl.Result{}, nil
}
-func (c *DeschedulingsPipelineController) SetupWithManager(mgr ctrl.Manager, mcl *multicluster.Client) error {
+func (c *DetectorPipelineController) SetupWithManager(mgr ctrl.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainNova
if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
// Initialize the cycle detector.
- return c.CycleBreaker.Init(ctx, mgr.GetClient(), c.Conf)
+ return c.DetectorCycleBreaker.Init(ctx, mgr.GetClient(), c.Conf)
})); err != nil {
return err
}
diff --git a/internal/scheduling/descheduling/nova/pipeline_controller_test.go b/internal/scheduling/nova/detector_pipeline_controller_test.go
similarity index 77%
rename from internal/scheduling/descheduling/nova/pipeline_controller_test.go
rename to internal/scheduling/nova/detector_pipeline_controller_test.go
index 05a32981f..0aa6952ea 100644
--- a/internal/scheduling/descheduling/nova/pipeline_controller_test.go
+++ b/internal/scheduling/nova/detector_pipeline_controller_test.go
@@ -9,8 +9,8 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
@@ -18,13 +18,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-type mockCycleBreaker struct{}
+type mockDetectorCycleBreaker struct{}
-func (m *mockCycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+func (m *mockDetectorCycleBreaker) Init(ctx context.Context, client client.Client, conf conf.Config) error {
return nil
}
-func (m *mockCycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
+func (m *mockDetectorCycleBreaker) Filter(ctx context.Context, decisions []plugins.VMDetection) ([]plugins.VMDetection, error) {
return decisions, nil
}
@@ -37,7 +37,7 @@ func (m *mockControllerStep) Init(ctx context.Context, client client.Client, ste
return nil
}
-func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
+func TestDetectorPipelineController_InitPipeline(t *testing.T) {
tests := []struct {
name string
steps []v1alpha1.DetectorSpec
@@ -74,14 +74,14 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- controller := &DeschedulingsPipelineController{
- Monitor: lib.NewDetectorPipelineMonitor(),
- CycleBreaker: &mockCycleBreaker{},
+ controller := &DetectorPipelineController{
+ Monitor: lib.NewDetectorPipelineMonitor(),
+ DetectorCycleBreaker: &mockDetectorCycleBreaker{},
}
pipeline := lib.DetectorPipeline[plugins.VMDetection]{
- CycleBreaker: controller.CycleBreaker,
- Monitor: controller.Monitor,
+ DetectorCycleBreaker: controller.DetectorCycleBreaker,
+ Monitor: controller.Monitor,
}
nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]lib.Detector[plugins.VMDetection]{
"mock-step": &mockControllerStep{},
@@ -107,7 +107,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
}
}
- if pipeline.CycleBreaker != controller.CycleBreaker {
+ if pipeline.DetectorCycleBreaker != controller.DetectorCycleBreaker {
t.Error("expected pipeline to have cycle detector set")
}
@@ -118,7 +118,7 @@ func TestDeschedulingsPipelineController_InitPipeline(t *testing.T) {
}
}
-func TestDeschedulingsPipelineController_Reconcile(t *testing.T) {
+func TestDetectorPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
if err != nil {
@@ -127,7 +127,7 @@ func TestDeschedulingsPipelineController_Reconcile(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).Build()
- controller := &DeschedulingsPipelineController{
+ controller := &DetectorPipelineController{
BasePipelineController: lib.BasePipelineController[*lib.DetectorPipeline[plugins.VMDetection]]{
Client: client,
},
diff --git a/internal/scheduling/external/nova/api.go b/internal/scheduling/nova/external_scheduler_api.go
similarity index 99%
rename from internal/scheduling/external/nova/api.go
rename to internal/scheduling/nova/external_scheduler_api.go
index 36a6a3ca3..26083c481 100644
--- a/internal/scheduling/external/nova/api.go
+++ b/internal/scheduling/nova/external_scheduler_api.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package nova
import (
"bytes"
diff --git a/internal/scheduling/external/nova/api_test.go b/internal/scheduling/nova/external_scheduler_api_test.go
similarity index 99%
rename from internal/scheduling/external/nova/api_test.go
rename to internal/scheduling/nova/external_scheduler_api_test.go
index 41043d496..0f06a47d4 100644
--- a/internal/scheduling/external/nova/api_test.go
+++ b/internal/scheduling/nova/external_scheduler_api_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package nova
import (
"bytes"
diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/nova/filter_weigher_pipeline_controller.go
similarity index 100%
rename from internal/scheduling/decisions/nova/pipeline_controller.go
rename to internal/scheduling/nova/filter_weigher_pipeline_controller.go
diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/pipeline_controller_test.go
rename to internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
diff --git a/internal/scheduling/descheduling/nova/nova_api.go b/internal/scheduling/nova/nova_api.go
similarity index 100%
rename from internal/scheduling/descheduling/nova/nova_api.go
rename to internal/scheduling/nova/nova_api.go
diff --git a/internal/scheduling/descheduling/nova/nova_api_test.go b/internal/scheduling/nova/nova_api_test.go
similarity index 100%
rename from internal/scheduling/descheduling/nova/nova_api_test.go
rename to internal/scheduling/nova/nova_api_test.go
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct.go
similarity index 96%
rename from internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
rename to internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct.go
index 022c610f6..b956fd26c 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct.go
+++ b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package kvm
+package detectors
import (
"context"
@@ -10,8 +10,8 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
similarity index 99%
rename from internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go
rename to internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
index 6da3ab2dc..1e89f875a 100644
--- a/internal/scheduling/descheduling/nova/plugins/kvm/avoid_high_steal_pct_test.go
+++ b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package kvm
+package detectors
import (
"testing"
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/nova/plugins/filters/filter_allowed_projects.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go
rename to internal/scheduling/nova/plugins/filters/filter_allowed_projects.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects_test.go b/internal/scheduling/nova/plugins/filters/filter_allowed_projects_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects_test.go
rename to internal/scheduling/nova/plugins/filters/filter_allowed_projects_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/nova/plugins/filters/filter_capabilities.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go
rename to internal/scheduling/nova/plugins/filters/filter_capabilities.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities_test.go b/internal/scheduling/nova/plugins/filters/filter_capabilities_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_capabilities_test.go
rename to internal/scheduling/nova/plugins/filters/filter_capabilities_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/nova/plugins/filters/filter_correct_az.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go
rename to internal/scheduling/nova/plugins/filters/filter_correct_az.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go b/internal/scheduling/nova/plugins/filters/filter_correct_az_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go
rename to internal/scheduling/nova/plugins/filters/filter_correct_az_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/nova/plugins/filters/filter_external_customer.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go
rename to internal/scheduling/nova/plugins/filters/filter_external_customer.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go b/internal/scheduling/nova/plugins/filters/filter_external_customer_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go
rename to internal/scheduling/nova/plugins/filters/filter_external_customer_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/nova/plugins/filters/filter_has_accelerators.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go
rename to internal/scheduling/nova/plugins/filters/filter_has_accelerators.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go b/internal/scheduling/nova/plugins/filters/filter_has_accelerators_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go
rename to internal/scheduling/nova/plugins/filters/filter_has_accelerators_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go
rename to internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go
rename to internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/nova/plugins/filters/filter_has_requested_traits.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go
rename to internal/scheduling/nova/plugins/filters/filter_has_requested_traits.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go b/internal/scheduling/nova/plugins/filters/filter_has_requested_traits_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go
rename to internal/scheduling/nova/plugins/filters/filter_has_requested_traits_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go b/internal/scheduling/nova/plugins/filters/filter_host_instructions.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions.go
rename to internal/scheduling/nova/plugins/filters/filter_host_instructions.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions_test.go b/internal/scheduling/nova/plugins/filters/filter_host_instructions_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_host_instructions_test.go
rename to internal/scheduling/nova/plugins/filters/filter_host_instructions_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go b/internal/scheduling/nova/plugins/filters/filter_instance_group_affinity.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity.go
rename to internal/scheduling/nova/plugins/filters/filter_instance_group_affinity.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity_test.go b/internal/scheduling/nova/plugins/filters/filter_instance_group_affinity_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_affinity_test.go
rename to internal/scheduling/nova/plugins/filters/filter_instance_group_affinity_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go b/internal/scheduling/nova/plugins/filters/filter_instance_group_anti_affinity.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity.go
rename to internal/scheduling/nova/plugins/filters/filter_instance_group_anti_affinity.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity_test.go b/internal/scheduling/nova/plugins/filters/filter_instance_group_anti_affinity_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_instance_group_anti_affinity_test.go
rename to internal/scheduling/nova/plugins/filters/filter_instance_group_anti_affinity_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go b/internal/scheduling/nova/plugins/filters/filter_live_migratable.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable.go
rename to internal/scheduling/nova/plugins/filters/filter_live_migratable.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go b/internal/scheduling/nova/plugins/filters/filter_live_migratable_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_live_migratable_test.go
rename to internal/scheduling/nova/plugins/filters/filter_live_migratable_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/nova/plugins/filters/filter_maintenance.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go
rename to internal/scheduling/nova/plugins/filters/filter_maintenance.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance_test.go b/internal/scheduling/nova/plugins/filters/filter_maintenance_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_maintenance_test.go
rename to internal/scheduling/nova/plugins/filters/filter_maintenance_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/nova/plugins/filters/filter_packed_virtqueue.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go
rename to internal/scheduling/nova/plugins/filters/filter_packed_virtqueue.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go b/internal/scheduling/nova/plugins/filters/filter_packed_virtqueue_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go
rename to internal/scheduling/nova/plugins/filters/filter_packed_virtqueue_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go b/internal/scheduling/nova/plugins/filters/filter_requested_destination.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination.go
rename to internal/scheduling/nova/plugins/filters/filter_requested_destination.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go b/internal/scheduling/nova/plugins/filters/filter_requested_destination_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_requested_destination_test.go
rename to internal/scheduling/nova/plugins/filters/filter_requested_destination_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/nova/plugins/filters/filter_status_conditions.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go
rename to internal/scheduling/nova/plugins/filters/filter_status_conditions.go
diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions_test.go b/internal/scheduling/nova/plugins/filters/filter_status_conditions_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions_test.go
rename to internal/scheduling/nova/plugins/filters/filter_status_conditions_test.go
diff --git a/internal/scheduling/descheduling/nova/plugins/vm_detection.go b/internal/scheduling/nova/plugins/vm_detection.go
similarity index 100%
rename from internal/scheduling/descheduling/nova/plugins/vm_detection.go
rename to internal/scheduling/nova/plugins/vm_detection.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go b/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
rename to internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go b/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
rename to internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
rename to internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
rename to internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
rename to internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
rename to internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go b/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing.go
rename to internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing_test.go b/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
rename to internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go b/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking.go
rename to internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking.go
diff --git a/internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking_test.go b/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go
similarity index 100%
rename from internal/scheduling/decisions/nova/plugins/weighers/vmware_hana_binpacking_test.go
rename to internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go
diff --git a/internal/scheduling/descheduling/nova/supported_detectors.go b/internal/scheduling/nova/supported_detectors.go
similarity index 61%
rename from internal/scheduling/descheduling/nova/supported_detectors.go
rename to internal/scheduling/nova/supported_detectors.go
index 3d093eb77..680437962 100644
--- a/internal/scheduling/descheduling/nova/supported_detectors.go
+++ b/internal/scheduling/nova/supported_detectors.go
@@ -4,13 +4,13 @@
package nova
import (
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/descheduling/nova/plugins/kvm"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins/detectors"
)
// Configuration of steps supported by the descheduler.
// The steps actually used by the scheduler are defined through the configuration file.
var supportedDetectors = map[string]lib.Detector[plugins.VMDetection]{
- "avoid_high_steal_pct": &kvm.AvoidHighStealPctStep{},
+ "avoid_high_steal_pct": &detectors.AvoidHighStealPctStep{},
}
diff --git a/internal/scheduling/decisions/nova/supported_filters.go b/internal/scheduling/nova/supported_filters.go
similarity index 95%
rename from internal/scheduling/decisions/nova/supported_filters.go
rename to internal/scheduling/nova/supported_filters.go
index 17e858714..2b5554b1f 100644
--- a/internal/scheduling/decisions/nova/supported_filters.go
+++ b/internal/scheduling/nova/supported_filters.go
@@ -5,8 +5,8 @@ package nova
import (
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova/plugins/filters"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins/filters"
)
type NovaFilter = lib.Filter[api.ExternalSchedulerRequest]
diff --git a/internal/scheduling/decisions/nova/supported_weighers.go b/internal/scheduling/nova/supported_weighers.go
similarity index 91%
rename from internal/scheduling/decisions/nova/supported_weighers.go
rename to internal/scheduling/nova/supported_weighers.go
index ed116a514..11bfb28eb 100644
--- a/internal/scheduling/decisions/nova/supported_weighers.go
+++ b/internal/scheduling/nova/supported_weighers.go
@@ -5,8 +5,8 @@ package nova
import (
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/nova/plugins/weighers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins/weighers"
)
type NovaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
From fc2c1cf53e4146ab22ba21b7b701b73357a58ff0 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:12:01 +0100
Subject: [PATCH 33/41] Fuse manila scheduling into scheduling/manila
---
cmd/main.go | 18 ++++++++----------
.../decisions/cinder/pipeline_controller.go | 14 +++++++-------
.../cinder/pipeline_controller_test.go | 12 ++++++------
.../decisions/machines/pipeline_controller.go | 16 ++++++++--------
.../machines/pipeline_controller_test.go | 12 ++++++------
.../decisions/pods/pipeline_controller.go | 16 ++++++++--------
.../decisions/pods/pipeline_controller_test.go | 12 ++++++------
.../cleanup.go => manila/decisions_cleanup.go} | 2 +-
.../decisions_cleanup_test.go} | 4 ++--
.../external_scheduler_api.go} | 2 +-
.../external_scheduler_api_test.go} | 2 +-
.../filter_weigher_pipeline_controller.go} | 14 +++++++-------
...filter_weigher_pipeline_controller_test.go} | 12 ++++++------
.../weighers/netapp_cpu_usage_balancing.go | 0
.../netapp_cpu_usage_balancing_test.go | 0
.../manila/supported_filters.go | 0
.../manila/supported_weighers.go | 2 +-
.../nova/filter_weigher_pipeline_controller.go | 14 +++++++-------
.../filter_weigher_pipeline_controller_test.go | 12 ++++++------
19 files changed, 81 insertions(+), 83 deletions(-)
rename internal/scheduling/{decisions/manila/cleanup.go => manila/decisions_cleanup.go} (97%)
rename internal/scheduling/{decisions/manila/cleanup_test.go => manila/decisions_cleanup_test.go} (98%)
rename internal/scheduling/{external/manila/api.go => manila/external_scheduler_api.go} (99%)
rename internal/scheduling/{external/manila/api_test.go => manila/external_scheduler_api_test.go} (99%)
rename internal/scheduling/{decisions/manila/pipeline_controller.go => manila/filter_weigher_pipeline_controller.go} (90%)
rename internal/scheduling/{decisions/manila/pipeline_controller_test.go => manila/filter_weigher_pipeline_controller_test.go} (97%)
rename internal/scheduling/{decisions => }/manila/plugins/weighers/netapp_cpu_usage_balancing.go (100%)
rename internal/scheduling/{decisions => }/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go (100%)
rename internal/scheduling/{decisions => }/manila/supported_filters.go (100%)
rename internal/scheduling/{decisions => }/manila/supported_weighers.go (84%)
diff --git a/cmd/main.go b/cmd/main.go
index 673ff85ec..65775f875 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -41,16 +41,14 @@ import (
decisionscinder "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/cinder"
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/explanation"
decisionsmachines "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/machines"
- decisionsmanila "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/manila"
decisionpods "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods"
cindere2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/cinder"
manilae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/manila"
novae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/nova"
cinderexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/cinder"
- manilaexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/manila"
schedulinglib "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/manila"
"github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
- decisionsnova "github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/commitments"
reservationscontroller "github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/controller"
"github.com/cobaltcore-dev/cortex/pkg/conf"
@@ -294,7 +292,7 @@ func main() {
metrics.Registry.MustRegister(&pipelineMonitor)
if slices.Contains(config.EnabledControllers, "nova-decisions-pipeline-controller") {
- decisionController := &decisionsnova.DecisionPipelineController{
+ decisionController := &nova.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -332,7 +330,7 @@ func main() {
}
}
if slices.Contains(config.EnabledControllers, "manila-decisions-pipeline-controller") {
- controller := &decisionsmanila.DecisionPipelineController{
+ controller := &manila.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -342,10 +340,10 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "DecisionReconciler")
os.Exit(1)
}
- manilaexternal.NewAPI(config, controller).Init(mux)
+ manila.NewAPI(config, controller).Init(mux)
}
if slices.Contains(config.EnabledControllers, "cinder-decisions-pipeline-controller") {
- controller := &decisionscinder.DecisionPipelineController{
+ controller := &decisionscinder.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -358,7 +356,7 @@ func main() {
cinderexternal.NewAPI(config, controller).Init(mux)
}
if slices.Contains(config.EnabledControllers, "ironcore-decisions-pipeline-controller") {
- controller := &decisionsmachines.DecisionPipelineController{
+ controller := &decisionsmachines.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -370,7 +368,7 @@ func main() {
}
}
if slices.Contains(config.EnabledControllers, "pods-decisions-pipeline-controller") {
- controller := &decisionpods.DecisionPipelineController{
+ controller := &decisionpods.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -525,7 +523,7 @@ func main() {
Interval: time.Hour,
Name: "manila-decisions-cleanup-task",
Run: func(ctx context.Context) error {
- return decisionsmanila.Cleanup(ctx, multiclusterClient, config)
+ return manila.DecisionsCleanup(ctx, multiclusterClient, config)
},
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to add manila decisions cleanup task to manager")
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller.go b/internal/scheduling/decisions/cinder/pipeline_controller.go
index a75989347..b6c456b99 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller.go
@@ -33,7 +33,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DecisionPipelineController struct {
+type FilterWeigherPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
@@ -47,12 +47,12 @@ type DecisionPipelineController struct {
}
// The type of pipeline this controller manages.
-func (c *DecisionPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *FilterWeigherPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeFilterWeigher
}
// Callback executed when kubernetes asks to reconcile a decision resource.
-func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *FilterWeigherPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -72,7 +72,7 @@ func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Req
}
// Process the decision from the API. Should create and return the updated decision.
-func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -111,7 +111,7 @@ func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Conte
return err
}
-func (c *DecisionPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
log := ctrl.LoggerFrom(ctx)
startedAt := time.Now() // So we can measure sync duration.
@@ -141,7 +141,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DecisionPipelineController) InitPipeline(
+func (c *FilterWeigherPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
@@ -154,7 +154,7 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (c *FilterWeigherPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainCinder
if err := mgr.Add(manager.RunnableFunc(c.InitAllPipelines)); err != nil {
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
index cfc42c8bf..444bdcebe 100644
--- a/internal/scheduling/decisions/cinder/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/cinder/pipeline_controller_test.go
@@ -23,7 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func TestDecisionPipelineController_Reconcile(t *testing.T) {
+func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -156,7 +156,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
@@ -220,7 +220,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
}
-func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
+func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -396,7 +396,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
@@ -469,8 +469,8 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
}
}
-func TestDecisionPipelineController_InitPipeline(t *testing.T) {
- controller := &DecisionPipelineController{
+func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
+ controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
}
diff --git a/internal/scheduling/decisions/machines/pipeline_controller.go b/internal/scheduling/decisions/machines/pipeline_controller.go
index 1e6bc712f..f7b4a0134 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller.go
@@ -37,7 +37,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DecisionPipelineController struct {
+type FilterWeigherPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]
@@ -51,11 +51,11 @@ type DecisionPipelineController struct {
}
// The type of pipeline this controller manages.
-func (c *DecisionPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *FilterWeigherPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeFilterWeigher
}
-func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *FilterWeigherPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -75,7 +75,7 @@ func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, nil
}
-func (c *DecisionPipelineController) ProcessNewMachine(ctx context.Context, machine *ironcorev1alpha1.Machine) error {
+func (c *FilterWeigherPipelineController) ProcessNewMachine(ctx context.Context, machine *ironcorev1alpha1.Machine) error {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -132,7 +132,7 @@ func (c *DecisionPipelineController) ProcessNewMachine(ctx context.Context, mach
return err
}
-func (c *DecisionPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
log := ctrl.LoggerFrom(ctx)
startedAt := time.Now() // So we can measure sync duration.
@@ -183,7 +183,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DecisionPipelineController) InitPipeline(
+func (c *FilterWeigherPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]] {
@@ -196,7 +196,7 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) handleMachine() handler.EventHandler {
+func (c *FilterWeigherPipelineController) handleMachine() handler.EventHandler {
return handler.Funcs{
CreateFunc: func(ctx context.Context, evt event.CreateEvent, queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
machine := evt.Object.(*ironcorev1alpha1.Machine)
@@ -236,7 +236,7 @@ func (c *DecisionPipelineController) handleMachine() handler.EventHandler {
}
}
-func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (c *FilterWeigherPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainMachines
if err := mgr.Add(manager.RunnableFunc(c.InitAllPipelines)); err != nil {
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/decisions/machines/pipeline_controller_test.go
index dc35f30d6..fa207f8eb 100644
--- a/internal/scheduling/decisions/machines/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/machines/pipeline_controller_test.go
@@ -21,7 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-func TestDecisionPipelineController_Reconcile(t *testing.T) {
+func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add scheduling scheme: %v", err)
@@ -120,7 +120,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]{
Pipelines: map[string]lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]{
"machines-scheduler": createMockPipeline(),
@@ -204,8 +204,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
}
-func TestDecisionPipelineController_InitPipeline(t *testing.T) {
- controller := &DecisionPipelineController{
+func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
+ controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
}
@@ -271,7 +271,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
}
-func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
+func TestFilterWeigherPipelineController_ProcessNewMachine(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add scheduling scheme: %v", err)
@@ -426,7 +426,7 @@ func TestDecisionPipelineController_ProcessNewMachine(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]]{
Pipelines: map[string]lib.FilterWeigherPipeline[ironcore.MachinePipelineRequest]{},
PipelineConfigs: map[string]v1alpha1.Pipeline{},
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/decisions/pods/pipeline_controller.go
index 0835cdfbc..888fcabcc 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller.go
@@ -36,7 +36,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DecisionPipelineController struct {
+type FilterWeigherPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]
@@ -50,11 +50,11 @@ type DecisionPipelineController struct {
}
// The type of pipeline this controller manages.
-func (c *DecisionPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *FilterWeigherPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeFilterWeigher
}
-func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *FilterWeigherPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -74,7 +74,7 @@ func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, nil
}
-func (c *DecisionPipelineController) ProcessNewPod(ctx context.Context, pod *corev1.Pod) error {
+func (c *FilterWeigherPipelineController) ProcessNewPod(ctx context.Context, pod *corev1.Pod) error {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -131,7 +131,7 @@ func (c *DecisionPipelineController) ProcessNewPod(ctx context.Context, pod *cor
return err
}
-func (c *DecisionPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
log := ctrl.LoggerFrom(ctx)
startedAt := time.Now() // So we can measure sync duration.
@@ -194,7 +194,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DecisionPipelineController) InitPipeline(
+func (c *FilterWeigherPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[lib.FilterWeigherPipeline[pods.PodPipelineRequest]] {
@@ -207,7 +207,7 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) handlePod() handler.EventHandler {
+func (c *FilterWeigherPipelineController) handlePod() handler.EventHandler {
return handler.Funcs{
CreateFunc: func(ctx context.Context, evt event.CreateEvent, queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
pod := evt.Object.(*corev1.Pod)
@@ -247,7 +247,7 @@ func (c *DecisionPipelineController) handlePod() handler.EventHandler {
}
}
-func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (c *FilterWeigherPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainPods
if err := mgr.Add(manager.RunnableFunc(c.InitAllPipelines)); err != nil {
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/decisions/pods/pipeline_controller_test.go
index 6a2253e38..01529e12b 100644
--- a/internal/scheduling/decisions/pods/pipeline_controller_test.go
+++ b/internal/scheduling/decisions/pods/pipeline_controller_test.go
@@ -20,7 +20,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-func TestDecisionPipelineController_Reconcile(t *testing.T) {
+func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add scheduling scheme: %v", err)
@@ -117,7 +117,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]{
Pipelines: map[string]lib.FilterWeigherPipeline[pods.PodPipelineRequest]{
"pods-scheduler": createMockPodPipeline(),
@@ -179,8 +179,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
}
-func TestDecisionPipelineController_InitPipeline(t *testing.T) {
- controller := &DecisionPipelineController{
+func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
+ controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
}
@@ -251,7 +251,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
}
-func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
+func TestFilterWeigherPipelineController_ProcessNewPod(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add scheduling scheme: %v", err)
@@ -406,7 +406,7 @@ func TestDecisionPipelineController_ProcessNewPod(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[pods.PodPipelineRequest]]{
Pipelines: map[string]lib.FilterWeigherPipeline[pods.PodPipelineRequest]{},
PipelineConfigs: map[string]v1alpha1.Pipeline{},
diff --git a/internal/scheduling/decisions/manila/cleanup.go b/internal/scheduling/manila/decisions_cleanup.go
similarity index 97%
rename from internal/scheduling/decisions/manila/cleanup.go
rename to internal/scheduling/manila/decisions_cleanup.go
index 6ad87aa3d..3e0e62f23 100644
--- a/internal/scheduling/decisions/manila/cleanup.go
+++ b/internal/scheduling/manila/decisions_cleanup.go
@@ -22,7 +22,7 @@ import (
)
// Delete all decisions for manila shares that have been deleted.
-func Cleanup(ctx context.Context, client client.Client, conf conf.Config) error {
+func DecisionsCleanup(ctx context.Context, client client.Client, conf conf.Config) error {
var authenticatedHTTP = http.DefaultClient
if conf.SSOSecretRef != nil {
var err error
diff --git a/internal/scheduling/decisions/manila/cleanup_test.go b/internal/scheduling/manila/decisions_cleanup_test.go
similarity index 98%
rename from internal/scheduling/decisions/manila/cleanup_test.go
rename to internal/scheduling/manila/decisions_cleanup_test.go
index 6431b2d78..1786f8d1d 100644
--- a/internal/scheduling/decisions/manila/cleanup_test.go
+++ b/internal/scheduling/manila/decisions_cleanup_test.go
@@ -339,7 +339,7 @@ func TestCleanupManila(t *testing.T) {
Namespace: "default",
},
}
- err := Cleanup(context.Background(), client, config)
+ err := DecisionsCleanup(context.Background(), client, config)
if tt.expectError && err == nil {
t.Error("Expected error but got none")
@@ -427,7 +427,7 @@ func TestCleanupManilaDecisionsCancel(t *testing.T) {
defer cancel()
// This should exit quickly due to context cancellation
- if err := Cleanup(ctx, client, config); err != nil {
+ if err := DecisionsCleanup(ctx, client, config); err != nil {
if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("Unexpected error during cleanup: %v", err)
}
diff --git a/internal/scheduling/external/manila/api.go b/internal/scheduling/manila/external_scheduler_api.go
similarity index 99%
rename from internal/scheduling/external/manila/api.go
rename to internal/scheduling/manila/external_scheduler_api.go
index 077893118..d5fc74a63 100644
--- a/internal/scheduling/external/manila/api.go
+++ b/internal/scheduling/manila/external_scheduler_api.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package manila
import (
"bytes"
diff --git a/internal/scheduling/external/manila/api_test.go b/internal/scheduling/manila/external_scheduler_api_test.go
similarity index 99%
rename from internal/scheduling/external/manila/api_test.go
rename to internal/scheduling/manila/external_scheduler_api_test.go
index 3339f7281..01f11c6b2 100644
--- a/internal/scheduling/external/manila/api_test.go
+++ b/internal/scheduling/manila/external_scheduler_api_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package manila
import (
"bytes"
diff --git a/internal/scheduling/decisions/manila/pipeline_controller.go b/internal/scheduling/manila/filter_weigher_pipeline_controller.go
similarity index 90%
rename from internal/scheduling/decisions/manila/pipeline_controller.go
rename to internal/scheduling/manila/filter_weigher_pipeline_controller.go
index dde661c89..ceb0320f2 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller.go
+++ b/internal/scheduling/manila/filter_weigher_pipeline_controller.go
@@ -33,7 +33,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DecisionPipelineController struct {
+type FilterWeigherPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
@@ -47,12 +47,12 @@ type DecisionPipelineController struct {
}
// The type of pipeline this controller manages.
-func (c *DecisionPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *FilterWeigherPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeFilterWeigher
}
// Callback executed when kubernetes asks to reconcile a decision resource.
-func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *FilterWeigherPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -72,7 +72,7 @@ func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Req
}
// Process the decision from the API. Should create and return the updated decision.
-func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -111,7 +111,7 @@ func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Conte
return err
}
-func (c *DecisionPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
log := ctrl.LoggerFrom(ctx)
startedAt := time.Now() // So we can measure sync duration.
@@ -141,7 +141,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DecisionPipelineController) InitPipeline(
+func (c *FilterWeigherPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
@@ -154,7 +154,7 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (c *FilterWeigherPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainManila
if err := mgr.Add(manager.RunnableFunc(c.InitAllPipelines)); err != nil {
diff --git a/internal/scheduling/decisions/manila/pipeline_controller_test.go b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
similarity index 97%
rename from internal/scheduling/decisions/manila/pipeline_controller_test.go
rename to internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
index c9cccb041..e6c158c23 100644
--- a/internal/scheduling/decisions/manila/pipeline_controller_test.go
+++ b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
@@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func TestDecisionPipelineController_Reconcile(t *testing.T) {
+func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -158,7 +158,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
@@ -217,7 +217,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
}
-func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
+func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -393,7 +393,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
@@ -466,7 +466,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
}
}
-func TestDecisionPipelineController_InitPipeline(t *testing.T) {
+func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -548,7 +548,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
WithObjects(tt.knowledges...).
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
}
controller.Client = client // Through basepipelinecontroller
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go b/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing.go
similarity index 100%
rename from internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing.go
rename to internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing.go
diff --git a/internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go b/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
similarity index 100%
rename from internal/scheduling/decisions/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
rename to internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
diff --git a/internal/scheduling/decisions/manila/supported_filters.go b/internal/scheduling/manila/supported_filters.go
similarity index 100%
rename from internal/scheduling/decisions/manila/supported_filters.go
rename to internal/scheduling/manila/supported_filters.go
diff --git a/internal/scheduling/decisions/manila/supported_weighers.go b/internal/scheduling/manila/supported_weighers.go
similarity index 84%
rename from internal/scheduling/decisions/manila/supported_weighers.go
rename to internal/scheduling/manila/supported_weighers.go
index ae370cd26..3e9a5b6cb 100644
--- a/internal/scheduling/decisions/manila/supported_weighers.go
+++ b/internal/scheduling/manila/supported_weighers.go
@@ -5,8 +5,8 @@ package manila
import (
api "github.com/cobaltcore-dev/cortex/api/delegation/manila"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/manila/plugins/weighers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/manila/plugins/weighers"
)
type ManilaWeigher = lib.Weigher[api.ExternalSchedulerRequest]
diff --git a/internal/scheduling/nova/filter_weigher_pipeline_controller.go b/internal/scheduling/nova/filter_weigher_pipeline_controller.go
index 17a876302..8d4b29c6c 100644
--- a/internal/scheduling/nova/filter_weigher_pipeline_controller.go
+++ b/internal/scheduling/nova/filter_weigher_pipeline_controller.go
@@ -34,7 +34,7 @@ import (
//
// Additionally, the controller watches for pipeline and step changes to
// reconfigure the pipelines as needed.
-type DecisionPipelineController struct {
+type FilterWeigherPipelineController struct {
// Toolbox shared between all pipeline controllers.
lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]
@@ -48,12 +48,12 @@ type DecisionPipelineController struct {
}
// The type of pipeline this controller manages.
-func (c *DecisionPipelineController) PipelineType() v1alpha1.PipelineType {
+func (c *FilterWeigherPipelineController) PipelineType() v1alpha1.PipelineType {
return v1alpha1.PipelineTypeFilterWeigher
}
// Callback executed when kubernetes asks to reconcile a decision resource.
-func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+func (c *FilterWeigherPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -73,7 +73,7 @@ func (c *DecisionPipelineController) Reconcile(ctx context.Context, req ctrl.Req
}
// Process the decision from the API. Should create and return the updated decision.
-func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) ProcessNewDecisionFromAPI(ctx context.Context, decision *v1alpha1.Decision) error {
c.processMu.Lock()
defer c.processMu.Unlock()
@@ -112,7 +112,7 @@ func (c *DecisionPipelineController) ProcessNewDecisionFromAPI(ctx context.Conte
return err
}
-func (c *DecisionPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
+func (c *FilterWeigherPipelineController) process(ctx context.Context, decision *v1alpha1.Decision) error {
log := ctrl.LoggerFrom(ctx)
startedAt := time.Now() // So we can measure sync duration.
@@ -148,7 +148,7 @@ func (c *DecisionPipelineController) process(ctx context.Context, decision *v1al
}
// The base controller will delegate the pipeline creation down to this method.
-func (c *DecisionPipelineController) InitPipeline(
+func (c *FilterWeigherPipelineController) InitPipeline(
ctx context.Context,
p v1alpha1.Pipeline,
) lib.PipelineInitResult[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]] {
@@ -161,7 +161,7 @@ func (c *DecisionPipelineController) InitPipeline(
)
}
-func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
+func (c *FilterWeigherPipelineController) SetupWithManager(mgr manager.Manager, mcl *multicluster.Client) error {
c.Initializer = c
c.SchedulingDomain = v1alpha1.SchedulingDomainNova
if err := mgr.Add(manager.RunnableFunc(c.InitAllPipelines)); err != nil {
diff --git a/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go b/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
index a0327e36d..5bd41a092 100644
--- a/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
@@ -24,7 +24,7 @@ import (
"github.com/cobaltcore-dev/cortex/pkg/conf"
)
-func TestDecisionPipelineController_Reconcile(t *testing.T) {
+func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -195,7 +195,7 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
@@ -257,8 +257,8 @@ func TestDecisionPipelineController_Reconcile(t *testing.T) {
}
}
-func TestDecisionPipelineController_InitPipeline(t *testing.T) {
- controller := &DecisionPipelineController{
+func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
+ controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
}
@@ -352,7 +352,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) {
}
}
-func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
+func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
t.Fatalf("Failed to add v1alpha1 scheme: %v", err)
@@ -662,7 +662,7 @@ func TestDecisionPipelineController_ProcessNewDecisionFromAPI(t *testing.T) {
WithStatusSubresource(&v1alpha1.Decision{}).
Build()
- controller := &DecisionPipelineController{
+ controller := &FilterWeigherPipelineController{
BasePipelineController: lib.BasePipelineController[lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]]{
Client: client,
Pipelines: make(map[string]lib.FilterWeigherPipeline[api.ExternalSchedulerRequest]),
From 483b5713f138d7bedf33505881d55458fd099c6d Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:19:00 +0100
Subject: [PATCH 34/41] Fuse cinder scheduling into scheduling/cinder
---
cmd/main.go | 9 ++++-----
.../cinder/cleanup.go => cinder/decisions_cleanup.go} | 2 +-
.../cleanup_test.go => cinder/decisions_cleanup_test.go} | 4 ++--
.../cinder/api.go => cinder/external_scheduler_api.go} | 2 +-
.../external_scheduler_api_test.go} | 2 +-
.../filter_weigher_pipeline_controller.go} | 0
.../filter_weigher_pipeline_controller_test.go} | 0
.../{decisions => }/cinder/supported_filters.go | 0
.../{decisions => }/cinder/supported_weighers.go | 0
9 files changed, 9 insertions(+), 10 deletions(-)
rename internal/scheduling/{decisions/cinder/cleanup.go => cinder/decisions_cleanup.go} (97%)
rename internal/scheduling/{decisions/cinder/cleanup_test.go => cinder/decisions_cleanup_test.go} (98%)
rename internal/scheduling/{external/cinder/api.go => cinder/external_scheduler_api.go} (99%)
rename internal/scheduling/{external/cinder/api_test.go => cinder/external_scheduler_api_test.go} (99%)
rename internal/scheduling/{decisions/cinder/pipeline_controller.go => cinder/filter_weigher_pipeline_controller.go} (100%)
rename internal/scheduling/{decisions/cinder/pipeline_controller_test.go => cinder/filter_weigher_pipeline_controller_test.go} (100%)
rename internal/scheduling/{decisions => }/cinder/supported_filters.go (100%)
rename internal/scheduling/{decisions => }/cinder/supported_weighers.go (100%)
diff --git a/cmd/main.go b/cmd/main.go
index 65775f875..6af675699 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -38,14 +38,13 @@ import (
"github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/prometheus"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor"
"github.com/cobaltcore-dev/cortex/internal/knowledge/kpis"
- decisionscinder "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/cinder"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/cinder"
"github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/explanation"
decisionsmachines "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/machines"
decisionpods "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods"
cindere2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/cinder"
manilae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/manila"
novae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/nova"
- cinderexternal "github.com/cobaltcore-dev/cortex/internal/scheduling/external/cinder"
schedulinglib "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/internal/scheduling/manila"
"github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
@@ -343,7 +342,7 @@ func main() {
manila.NewAPI(config, controller).Init(mux)
}
if slices.Contains(config.EnabledControllers, "cinder-decisions-pipeline-controller") {
- controller := &decisionscinder.FilterWeigherPipelineController{
+ controller := &cinder.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -353,7 +352,7 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "DecisionReconciler")
os.Exit(1)
}
- cinderexternal.NewAPI(config, controller).Init(mux)
+ cinder.NewAPI(config, controller).Init(mux)
}
if slices.Contains(config.EnabledControllers, "ironcore-decisions-pipeline-controller") {
controller := &decisionsmachines.FilterWeigherPipelineController{
@@ -537,7 +536,7 @@ func main() {
Interval: time.Hour,
Name: "cinder-decisions-cleanup-task",
Run: func(ctx context.Context) error {
- return decisionscinder.Cleanup(ctx, multiclusterClient, config)
+ return cinder.DecisionsCleanup(ctx, multiclusterClient, config)
},
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to add cinder decisions cleanup task to manager")
diff --git a/internal/scheduling/decisions/cinder/cleanup.go b/internal/scheduling/cinder/decisions_cleanup.go
similarity index 97%
rename from internal/scheduling/decisions/cinder/cleanup.go
rename to internal/scheduling/cinder/decisions_cleanup.go
index b25f66aa4..b4774a450 100644
--- a/internal/scheduling/decisions/cinder/cleanup.go
+++ b/internal/scheduling/cinder/decisions_cleanup.go
@@ -20,7 +20,7 @@ import (
)
// Delete all decisions for cinder volumes that have been deleted.
-func Cleanup(ctx context.Context, client client.Client, conf conf.Config) error {
+func DecisionsCleanup(ctx context.Context, client client.Client, conf conf.Config) error {
var authenticatedHTTP = http.DefaultClient
if conf.SSOSecretRef != nil {
var err error
diff --git a/internal/scheduling/decisions/cinder/cleanup_test.go b/internal/scheduling/cinder/decisions_cleanup_test.go
similarity index 98%
rename from internal/scheduling/decisions/cinder/cleanup_test.go
rename to internal/scheduling/cinder/decisions_cleanup_test.go
index 778f508e6..be3a9dbf5 100644
--- a/internal/scheduling/decisions/cinder/cleanup_test.go
+++ b/internal/scheduling/cinder/decisions_cleanup_test.go
@@ -289,7 +289,7 @@ func TestCleanupCinder(t *testing.T) {
Namespace: "default",
},
}
- err := Cleanup(context.Background(), client, config)
+ err := DecisionsCleanup(context.Background(), client, config)
if tt.expectError && err == nil {
t.Error("Expected error but got none")
@@ -377,7 +377,7 @@ func TestCleanupCinderDecisionsCancel(t *testing.T) {
defer cancel()
// This should exit quickly due to context cancellation
- if err := Cleanup(ctx, client, config); err != nil {
+ if err := DecisionsCleanup(ctx, client, config); err != nil {
if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("Unexpected error during cleanup: %v", err)
}
diff --git a/internal/scheduling/external/cinder/api.go b/internal/scheduling/cinder/external_scheduler_api.go
similarity index 99%
rename from internal/scheduling/external/cinder/api.go
rename to internal/scheduling/cinder/external_scheduler_api.go
index 00aa8becf..3df397a68 100644
--- a/internal/scheduling/external/cinder/api.go
+++ b/internal/scheduling/cinder/external_scheduler_api.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package cinder
import (
"bytes"
diff --git a/internal/scheduling/external/cinder/api_test.go b/internal/scheduling/cinder/external_scheduler_api_test.go
similarity index 99%
rename from internal/scheduling/external/cinder/api_test.go
rename to internal/scheduling/cinder/external_scheduler_api_test.go
index f6f6c253d..5ad6d793e 100644
--- a/internal/scheduling/external/cinder/api_test.go
+++ b/internal/scheduling/cinder/external_scheduler_api_test.go
@@ -1,7 +1,7 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0
-package http
+package cinder
import (
"bytes"
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller.go b/internal/scheduling/cinder/filter_weigher_pipeline_controller.go
similarity index 100%
rename from internal/scheduling/decisions/cinder/pipeline_controller.go
rename to internal/scheduling/cinder/filter_weigher_pipeline_controller.go
diff --git a/internal/scheduling/decisions/cinder/pipeline_controller_test.go b/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
similarity index 100%
rename from internal/scheduling/decisions/cinder/pipeline_controller_test.go
rename to internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
diff --git a/internal/scheduling/decisions/cinder/supported_filters.go b/internal/scheduling/cinder/supported_filters.go
similarity index 100%
rename from internal/scheduling/decisions/cinder/supported_filters.go
rename to internal/scheduling/cinder/supported_filters.go
diff --git a/internal/scheduling/decisions/cinder/supported_weighers.go b/internal/scheduling/cinder/supported_weighers.go
similarity index 100%
rename from internal/scheduling/decisions/cinder/supported_weighers.go
rename to internal/scheduling/cinder/supported_weighers.go
From b44972147014eef1f5506cd39af3c9592ed95eb0 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:22:56 +0100
Subject: [PATCH 35/41] machines + pods + explanation
---
cmd/main.go | 10 +++++-----
.../{decisions => }/explanation/controller.go | 0
.../{decisions => }/explanation/controller_test.go | 0
.../{decisions => }/explanation/explainer.go | 0
.../{decisions => }/explanation/explainer_test.go | 0
.../{decisions => }/explanation/templates.go | 0
.../scheduling/{decisions => }/explanation/types.go | 0
.../filter_weigher_pipeline_controller.go} | 0
.../filter_weigher_pipeline_controller_test.go} | 0
internal/scheduling/{decisions => }/machines/noop.go | 0
.../scheduling/{decisions => }/machines/noop_test.go | 0
.../{decisions => }/machines/supported_filters.go | 0
.../{decisions => }/machines/supported_weighers.go | 0
.../filter_weigher_pipeline_controller.go} | 0
.../filter_weigher_pipeline_controller_test.go} | 0
.../{decisions => }/pods/helpers/resources.go | 0
.../{decisions => }/pods/helpers/resources_test.go | 0
.../pods/plugins/filters/filter_node_affinity.go | 0
.../pods/plugins/filters/filter_node_affinity_test.go | 0
.../pods/plugins/filters/filter_node_available.go | 0
.../pods/plugins/filters/filter_node_available_test.go | 0
.../pods/plugins/filters/filter_node_capacity.go | 2 +-
.../pods/plugins/filters/filter_node_capacity_test.go | 0
.../pods/plugins/filters/filter_noop.go | 0
.../pods/plugins/filters/filter_noop_test.go | 0
.../pods/plugins/filters/filter_taint.go | 0
.../pods/plugins/filters/filter_taint_test.go | 0
.../{decisions => }/pods/plugins/weighers/binpack.go | 2 +-
.../pods/plugins/weighers/binpack_test.go | 0
.../{decisions => }/pods/supported_filters.go | 2 +-
.../{decisions => }/pods/supported_weighers.go | 2 +-
31 files changed, 9 insertions(+), 9 deletions(-)
rename internal/scheduling/{decisions => }/explanation/controller.go (100%)
rename internal/scheduling/{decisions => }/explanation/controller_test.go (100%)
rename internal/scheduling/{decisions => }/explanation/explainer.go (100%)
rename internal/scheduling/{decisions => }/explanation/explainer_test.go (100%)
rename internal/scheduling/{decisions => }/explanation/templates.go (100%)
rename internal/scheduling/{decisions => }/explanation/types.go (100%)
rename internal/scheduling/{decisions/machines/pipeline_controller.go => machines/filter_weigher_pipeline_controller.go} (100%)
rename internal/scheduling/{decisions/machines/pipeline_controller_test.go => machines/filter_weigher_pipeline_controller_test.go} (100%)
rename internal/scheduling/{decisions => }/machines/noop.go (100%)
rename internal/scheduling/{decisions => }/machines/noop_test.go (100%)
rename internal/scheduling/{decisions => }/machines/supported_filters.go (100%)
rename internal/scheduling/{decisions => }/machines/supported_weighers.go (100%)
rename internal/scheduling/{decisions/pods/pipeline_controller.go => pods/filter_weigher_pipeline_controller.go} (100%)
rename internal/scheduling/{decisions/pods/pipeline_controller_test.go => pods/filter_weigher_pipeline_controller_test.go} (100%)
rename internal/scheduling/{decisions => }/pods/helpers/resources.go (100%)
rename internal/scheduling/{decisions => }/pods/helpers/resources_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_affinity.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_affinity_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_available.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_available_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_capacity.go (94%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_node_capacity_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_noop.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_noop_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_taint.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/filters/filter_taint_test.go (100%)
rename internal/scheduling/{decisions => }/pods/plugins/weighers/binpack.go (96%)
rename internal/scheduling/{decisions => }/pods/plugins/weighers/binpack_test.go (100%)
rename internal/scheduling/{decisions => }/pods/supported_filters.go (88%)
rename internal/scheduling/{decisions => }/pods/supported_weighers.go (83%)
diff --git a/cmd/main.go b/cmd/main.go
index 6af675699..1b00b4e38 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -39,15 +39,15 @@ import (
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor"
"github.com/cobaltcore-dev/cortex/internal/knowledge/kpis"
"github.com/cobaltcore-dev/cortex/internal/scheduling/cinder"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/explanation"
- decisionsmachines "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/machines"
- decisionpods "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods"
cindere2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/cinder"
manilae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/manila"
novae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/nova"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/explanation"
schedulinglib "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/machines"
"github.com/cobaltcore-dev/cortex/internal/scheduling/manila"
"github.com/cobaltcore-dev/cortex/internal/scheduling/nova"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/pods"
"github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/commitments"
reservationscontroller "github.com/cobaltcore-dev/cortex/internal/scheduling/reservations/controller"
"github.com/cobaltcore-dev/cortex/pkg/conf"
@@ -355,7 +355,7 @@ func main() {
cinder.NewAPI(config, controller).Init(mux)
}
if slices.Contains(config.EnabledControllers, "ironcore-decisions-pipeline-controller") {
- controller := &decisionsmachines.FilterWeigherPipelineController{
+ controller := &machines.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
@@ -367,7 +367,7 @@ func main() {
}
}
if slices.Contains(config.EnabledControllers, "pods-decisions-pipeline-controller") {
- controller := &decisionpods.FilterWeigherPipelineController{
+ controller := &pods.FilterWeigherPipelineController{
Monitor: pipelineMonitor,
Conf: config,
}
diff --git a/internal/scheduling/decisions/explanation/controller.go b/internal/scheduling/explanation/controller.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/controller.go
rename to internal/scheduling/explanation/controller.go
diff --git a/internal/scheduling/decisions/explanation/controller_test.go b/internal/scheduling/explanation/controller_test.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/controller_test.go
rename to internal/scheduling/explanation/controller_test.go
diff --git a/internal/scheduling/decisions/explanation/explainer.go b/internal/scheduling/explanation/explainer.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/explainer.go
rename to internal/scheduling/explanation/explainer.go
diff --git a/internal/scheduling/decisions/explanation/explainer_test.go b/internal/scheduling/explanation/explainer_test.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/explainer_test.go
rename to internal/scheduling/explanation/explainer_test.go
diff --git a/internal/scheduling/decisions/explanation/templates.go b/internal/scheduling/explanation/templates.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/templates.go
rename to internal/scheduling/explanation/templates.go
diff --git a/internal/scheduling/decisions/explanation/types.go b/internal/scheduling/explanation/types.go
similarity index 100%
rename from internal/scheduling/decisions/explanation/types.go
rename to internal/scheduling/explanation/types.go
diff --git a/internal/scheduling/decisions/machines/pipeline_controller.go b/internal/scheduling/machines/filter_weigher_pipeline_controller.go
similarity index 100%
rename from internal/scheduling/decisions/machines/pipeline_controller.go
rename to internal/scheduling/machines/filter_weigher_pipeline_controller.go
diff --git a/internal/scheduling/decisions/machines/pipeline_controller_test.go b/internal/scheduling/machines/filter_weigher_pipeline_controller_test.go
similarity index 100%
rename from internal/scheduling/decisions/machines/pipeline_controller_test.go
rename to internal/scheduling/machines/filter_weigher_pipeline_controller_test.go
diff --git a/internal/scheduling/decisions/machines/noop.go b/internal/scheduling/machines/noop.go
similarity index 100%
rename from internal/scheduling/decisions/machines/noop.go
rename to internal/scheduling/machines/noop.go
diff --git a/internal/scheduling/decisions/machines/noop_test.go b/internal/scheduling/machines/noop_test.go
similarity index 100%
rename from internal/scheduling/decisions/machines/noop_test.go
rename to internal/scheduling/machines/noop_test.go
diff --git a/internal/scheduling/decisions/machines/supported_filters.go b/internal/scheduling/machines/supported_filters.go
similarity index 100%
rename from internal/scheduling/decisions/machines/supported_filters.go
rename to internal/scheduling/machines/supported_filters.go
diff --git a/internal/scheduling/decisions/machines/supported_weighers.go b/internal/scheduling/machines/supported_weighers.go
similarity index 100%
rename from internal/scheduling/decisions/machines/supported_weighers.go
rename to internal/scheduling/machines/supported_weighers.go
diff --git a/internal/scheduling/decisions/pods/pipeline_controller.go b/internal/scheduling/pods/filter_weigher_pipeline_controller.go
similarity index 100%
rename from internal/scheduling/decisions/pods/pipeline_controller.go
rename to internal/scheduling/pods/filter_weigher_pipeline_controller.go
diff --git a/internal/scheduling/decisions/pods/pipeline_controller_test.go b/internal/scheduling/pods/filter_weigher_pipeline_controller_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/pipeline_controller_test.go
rename to internal/scheduling/pods/filter_weigher_pipeline_controller_test.go
diff --git a/internal/scheduling/decisions/pods/helpers/resources.go b/internal/scheduling/pods/helpers/resources.go
similarity index 100%
rename from internal/scheduling/decisions/pods/helpers/resources.go
rename to internal/scheduling/pods/helpers/resources.go
diff --git a/internal/scheduling/decisions/pods/helpers/resources_test.go b/internal/scheduling/pods/helpers/resources_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/helpers/resources_test.go
rename to internal/scheduling/pods/helpers/resources_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go b/internal/scheduling/pods/plugins/filters/filter_node_affinity.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity.go
rename to internal/scheduling/pods/plugins/filters/filter_node_affinity.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity_test.go b/internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_affinity_test.go
rename to internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go b/internal/scheduling/pods/plugins/filters/filter_node_available.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_available.go
rename to internal/scheduling/pods/plugins/filters/filter_node_available.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_available_test.go b/internal/scheduling/pods/plugins/filters/filter_node_available_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_available_test.go
rename to internal/scheduling/pods/plugins/filters/filter_node_available_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go b/internal/scheduling/pods/plugins/filters/filter_node_capacity.go
similarity index 94%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
rename to internal/scheduling/pods/plugins/filters/filter_node_capacity.go
index 2e412f593..f148aaecf 100644
--- a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity.go
+++ b/internal/scheduling/pods/plugins/filters/filter_node_capacity.go
@@ -9,8 +9,8 @@ import (
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/helpers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/pods/helpers"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity_test.go b/internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_node_capacity_test.go
rename to internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop.go b/internal/scheduling/pods/plugins/filters/filter_noop.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_noop.go
rename to internal/scheduling/pods/plugins/filters/filter_noop.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_noop_test.go b/internal/scheduling/pods/plugins/filters/filter_noop_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_noop_test.go
rename to internal/scheduling/pods/plugins/filters/filter_noop_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint.go b/internal/scheduling/pods/plugins/filters/filter_taint.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_taint.go
rename to internal/scheduling/pods/plugins/filters/filter_taint.go
diff --git a/internal/scheduling/decisions/pods/plugins/filters/filter_taint_test.go b/internal/scheduling/pods/plugins/filters/filter_taint_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/filters/filter_taint_test.go
rename to internal/scheduling/pods/plugins/filters/filter_taint_test.go
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go b/internal/scheduling/pods/plugins/weighers/binpack.go
similarity index 96%
rename from internal/scheduling/decisions/pods/plugins/weighers/binpack.go
rename to internal/scheduling/pods/plugins/weighers/binpack.go
index 07bd7d904..62a345694 100644
--- a/internal/scheduling/decisions/pods/plugins/weighers/binpack.go
+++ b/internal/scheduling/pods/plugins/weighers/binpack.go
@@ -9,8 +9,8 @@ import (
"math"
api "github.com/cobaltcore-dev/cortex/api/delegation/pods"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/helpers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/pods/helpers"
corev1 "k8s.io/api/core/v1"
)
diff --git a/internal/scheduling/decisions/pods/plugins/weighers/binpack_test.go b/internal/scheduling/pods/plugins/weighers/binpack_test.go
similarity index 100%
rename from internal/scheduling/decisions/pods/plugins/weighers/binpack_test.go
rename to internal/scheduling/pods/plugins/weighers/binpack_test.go
diff --git a/internal/scheduling/decisions/pods/supported_filters.go b/internal/scheduling/pods/supported_filters.go
similarity index 88%
rename from internal/scheduling/decisions/pods/supported_filters.go
rename to internal/scheduling/pods/supported_filters.go
index 9d0eb5d43..4c39652d3 100644
--- a/internal/scheduling/decisions/pods/supported_filters.go
+++ b/internal/scheduling/pods/supported_filters.go
@@ -5,8 +5,8 @@ package pods
import (
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/plugins/filters"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/pods/plugins/filters"
)
type PodFilter = lib.Filter[pods.PodPipelineRequest]
diff --git a/internal/scheduling/decisions/pods/supported_weighers.go b/internal/scheduling/pods/supported_weighers.go
similarity index 83%
rename from internal/scheduling/decisions/pods/supported_weighers.go
rename to internal/scheduling/pods/supported_weighers.go
index ff0449100..7fae5b073 100644
--- a/internal/scheduling/decisions/pods/supported_weighers.go
+++ b/internal/scheduling/pods/supported_weighers.go
@@ -5,8 +5,8 @@ package pods
import (
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
- "github.com/cobaltcore-dev/cortex/internal/scheduling/decisions/pods/plugins/weighers"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/pods/plugins/weighers"
)
type PodWeigher = lib.Weigher[pods.PodPipelineRequest]
From 7cbe704bc9f1f52e70c583765919affe9361dc59 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:24:33 +0100
Subject: [PATCH 36/41] e2e checks into scheduling domains
---
cmd/main.go | 9 +++------
.../{e2e/cinder/checks.go => cinder/e2e_checks.go} | 0
.../{e2e/manila/checks.go => manila/e2e_checks.go} | 0
.../{e2e/nova/checks.go => nova/e2e_checks.go} | 0
4 files changed, 3 insertions(+), 6 deletions(-)
rename internal/scheduling/{e2e/cinder/checks.go => cinder/e2e_checks.go} (100%)
rename internal/scheduling/{e2e/manila/checks.go => manila/e2e_checks.go} (100%)
rename internal/scheduling/{e2e/nova/checks.go => nova/e2e_checks.go} (100%)
diff --git a/cmd/main.go b/cmd/main.go
index 1b00b4e38..0bb89168a 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -39,9 +39,6 @@ import (
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor"
"github.com/cobaltcore-dev/cortex/internal/knowledge/kpis"
"github.com/cobaltcore-dev/cortex/internal/scheduling/cinder"
- cindere2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/cinder"
- manilae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/manila"
- novae2e "github.com/cobaltcore-dev/cortex/internal/scheduling/e2e/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/explanation"
schedulinglib "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
"github.com/cobaltcore-dev/cortex/internal/scheduling/machines"
@@ -89,13 +86,13 @@ func main() {
client := must.Return(client.New(restConfig, copts))
switch os.Args[1] {
case "e2e-nova":
- novae2e.RunChecks(ctx, client, config)
+ nova.RunChecks(ctx, client, config)
return
case "e2e-cinder":
- cindere2e.RunChecks(ctx, client, config)
+ cinder.RunChecks(ctx, client, config)
return
case "e2e-manila":
- manilae2e.RunChecks(ctx, client, config)
+ manila.RunChecks(ctx, client, config)
return
}
}
diff --git a/internal/scheduling/e2e/cinder/checks.go b/internal/scheduling/cinder/e2e_checks.go
similarity index 100%
rename from internal/scheduling/e2e/cinder/checks.go
rename to internal/scheduling/cinder/e2e_checks.go
diff --git a/internal/scheduling/e2e/manila/checks.go b/internal/scheduling/manila/e2e_checks.go
similarity index 100%
rename from internal/scheduling/e2e/manila/checks.go
rename to internal/scheduling/manila/e2e_checks.go
diff --git a/internal/scheduling/e2e/nova/checks.go b/internal/scheduling/nova/e2e_checks.go
similarity index 100%
rename from internal/scheduling/e2e/nova/checks.go
rename to internal/scheduling/nova/e2e_checks.go
From 6fd38d96e85fcc7a35f6bf0eb02c4506079c671a Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 10:41:15 +0100
Subject: [PATCH 37/41] Adjust metric names
---
internal/scheduling/lib/detector_monitor.go | 19 +--
.../scheduling/lib/detector_monitor_test.go | 3 -
.../lib/filter_weigher_pipeline_monitor.go | 18 +--
.../filter_weigher_pipeline_monitor_test.go | 116 +++++++++---------
.../dashboards/cortex-status.json | 12 +-
5 files changed, 78 insertions(+), 90 deletions(-)
diff --git a/internal/scheduling/lib/detector_monitor.go b/internal/scheduling/lib/detector_monitor.go
index 89e5a01a7..cb307d410 100644
--- a/internal/scheduling/lib/detector_monitor.go
+++ b/internal/scheduling/lib/detector_monitor.go
@@ -18,8 +18,6 @@ type DetectorPipelineMonitor struct {
stepDeschedulingCounter *prometheus.GaugeVec
// A histogram to measure how long the pipeline takes to run in total.
pipelineRunTimer prometheus.Histogram
- // A histogram to measure how long it takes to deschedule a VM.
- deschedulingRunTimer *prometheus.HistogramVec
// The name of the pipeline being monitored.
PipelineName string
@@ -28,24 +26,19 @@ type DetectorPipelineMonitor struct {
func NewDetectorPipelineMonitor() DetectorPipelineMonitor {
return DetectorPipelineMonitor{
stepRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_descheduler_pipeline_step_run_duration_seconds",
- Help: "Duration of descheduler pipeline step run",
+ Name: "cortex_detector_pipeline_step_run_duration_seconds",
+ Help: "Duration of detector pipeline step run",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 21), // 0.001s to ~1048s in 21 buckets
}, []string{"step"}),
stepDeschedulingCounter: prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Name: "cortex_descheduler_pipeline_step_vms_descheduled",
- Help: "Number of vms descheduled by a descheduler pipeline step",
+ Name: "cortex_detector_pipeline_step_detections",
+ Help: "Number of resources detected by a detector pipeline step",
}, []string{"step"}),
pipelineRunTimer: prometheus.NewHistogram(prometheus.HistogramOpts{
- Name: "cortex_descheduler_pipeline_run_duration_seconds",
+ Name: "cortex_detector_pipeline_run_duration_seconds",
Help: "Duration of descheduler pipeline run",
Buckets: prometheus.DefBuckets,
}),
- deschedulingRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_descheduler_pipeline_vm_descheduling_duration_seconds",
- Help: "Duration of descheduling a VM in the descheduler pipeline",
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 21), // 0.001s to ~1048s in 21 buckets
- }, []string{"error", "skipped", "source_host", "target_host", "vm_id"}),
}
}
@@ -60,14 +53,12 @@ func (m *DetectorPipelineMonitor) Describe(ch chan<- *prometheus.Desc) {
m.stepRunTimer.Describe(ch)
m.stepDeschedulingCounter.Describe(ch)
m.pipelineRunTimer.Describe(ch)
- m.deschedulingRunTimer.Describe(ch)
}
func (m *DetectorPipelineMonitor) Collect(ch chan<- prometheus.Metric) {
m.stepRunTimer.Collect(ch)
m.stepDeschedulingCounter.Collect(ch)
m.pipelineRunTimer.Collect(ch)
- m.deschedulingRunTimer.Collect(ch)
}
type DetectorMonitor[DetectionType Detection] struct {
diff --git a/internal/scheduling/lib/detector_monitor_test.go b/internal/scheduling/lib/detector_monitor_test.go
index 12b987b23..0ebd40775 100644
--- a/internal/scheduling/lib/detector_monitor_test.go
+++ b/internal/scheduling/lib/detector_monitor_test.go
@@ -27,9 +27,6 @@ func TestNewDetectorPipelineMonitor(t *testing.T) {
if monitor.pipelineRunTimer == nil {
t.Error("expected pipelineRunTimer to be initialized")
}
- if monitor.deschedulingRunTimer == nil {
- t.Error("expected deschedulingRunTimer to be initialized")
- }
}
func TestMonitor_Describe(t *testing.T) {
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_monitor.go b/internal/scheduling/lib/filter_weigher_pipeline_monitor.go
index 4b8adf94c..0b55eda94 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_monitor.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_monitor.go
@@ -39,48 +39,48 @@ func NewPipelineMonitor() FilterWeigherPipelineMonitor {
buckets = append(buckets, prometheus.LinearBuckets(10, 10, 4)...)
buckets = append(buckets, prometheus.LinearBuckets(50, 50, 6)...)
stepReorderingsObserver := prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_step_shift_origin",
+ Name: "cortex_filter_weigher_pipeline_step_shift_origin",
Help: "From which index of the subject list the subject came from originally.",
Buckets: buckets,
}, []string{"pipeline", "step", "outidx"})
return FilterWeigherPipelineMonitor{
stepRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_step_run_duration_seconds",
+ Name: "cortex_filter_weigher_pipeline_step_run_duration_seconds",
Help: "Duration of scheduler pipeline step run",
Buckets: prometheus.DefBuckets,
}, []string{"pipeline", "step"}),
stepSubjectWeight: prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Name: "cortex_scheduler_pipeline_step_weight_modification",
+ Name: "cortex_filter_weigher_pipeline_step_weight_modification",
Help: "Modification of subject weight by scheduler pipeline step",
}, []string{"pipeline", "subject", "step"}),
stepRemovedSubjectsObserver: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_step_removed_subjects",
+ Name: "cortex_filter_weigher_pipeline_step_removed_subjects",
Help: "Number of subjects removed by scheduler pipeline step",
Buckets: prometheus.ExponentialBucketsRange(1, 1000, 10),
}, []string{"pipeline", "step"}),
stepReorderingsObserver: stepReorderingsObserver,
stepImpactObserver: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_step_impact",
+ Name: "cortex_filter_weigher_pipeline_step_impact",
Help: "Impact of the step on the subjects",
Buckets: prometheus.ExponentialBucketsRange(0.01, 1000, 20),
}, []string{"pipeline", "step", "stat", "unit"}),
pipelineRunTimer: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_run_duration_seconds",
+ Name: "cortex_filter_weigher_pipeline_run_duration_seconds",
Help: "Duration of scheduler pipeline run",
Buckets: prometheus.DefBuckets,
}, []string{"pipeline"}),
subjectNumberInObserver: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_subject_number_in",
+ Name: "cortex_filter_weigher_pipeline_subject_number_in",
Help: "Number of subjects going into the scheduler pipeline",
Buckets: prometheus.ExponentialBucketsRange(1, 1000, 10),
}, []string{"pipeline"}),
subjectNumberOutObserver: prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_scheduler_pipeline_subject_number_out",
+ Name: "cortex_filter_weigher_pipeline_subject_number_out",
Help: "Number of subjects coming out of the scheduler pipeline",
Buckets: prometheus.ExponentialBucketsRange(1, 1000, 10),
}, []string{"pipeline"}),
requestCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
- Name: "cortex_scheduler_pipeline_requests_total",
+ Name: "cortex_filter_weigher_pipeline_requests_total",
Help: "Total number of requests processed by the scheduler.",
}, []string{"pipeline"}),
}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go b/internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go
index a0c009293..023a23ae1 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_monitor_test.go
@@ -19,98 +19,98 @@ func TestSchedulerMonitor(t *testing.T) {
// Test stepRunTimer
expectedStepRunTimer := strings.NewReader(`
- # HELP cortex_scheduler_pipeline_step_run_duration_seconds Duration of scheduler pipeline step run
- # TYPE cortex_scheduler_pipeline_step_run_duration_seconds histogram
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.005"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.01"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.025"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.05"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.1"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.25"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.5"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="1"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="2.5"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="5"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="10"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="+Inf"} 1
- cortex_scheduler_pipeline_step_run_duration_seconds_sum{pipeline="test",step="test_step"} 0
- cortex_scheduler_pipeline_step_run_duration_seconds_count{pipeline="test",step="test_step"} 1
+ # HELP cortex_filter_weigher_pipeline_step_run_duration_seconds Duration of scheduler pipeline step run
+ # TYPE cortex_filter_weigher_pipeline_step_run_duration_seconds histogram
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.005"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.01"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.025"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.05"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.1"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.25"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="0.5"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="1"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="2.5"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="5"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="10"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket{pipeline="test",step="test_step",le="+Inf"} 1
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_sum{pipeline="test",step="test_step"} 0
+ cortex_filter_weigher_pipeline_step_run_duration_seconds_count{pipeline="test",step="test_step"} 1
`)
monitor.stepRunTimer.WithLabelValues("test", "test_step").Observe(0)
- err := testutil.GatherAndCompare(registry, expectedStepRunTimer, "cortex_scheduler_pipeline_step_run_duration_seconds")
+ err := testutil.GatherAndCompare(registry, expectedStepRunTimer, "cortex_filter_weigher_pipeline_step_run_duration_seconds")
if err != nil {
t.Fatalf("stepRunTimer test failed: %v", err)
}
// Test stepSubjectWeight
expectedStepSubjectWeight := strings.NewReader(`
- # HELP cortex_scheduler_pipeline_step_weight_modification Modification of subject weight by scheduler pipeline step
- # TYPE cortex_scheduler_pipeline_step_weight_modification gauge
- cortex_scheduler_pipeline_step_weight_modification{pipeline="test",step="test_step",subject="test_subject"} 42
+ # HELP cortex_filter_weigher_pipeline_step_weight_modification Modification of subject weight by scheduler pipeline step
+ # TYPE cortex_filter_weigher_pipeline_step_weight_modification gauge
+ cortex_filter_weigher_pipeline_step_weight_modification{pipeline="test",step="test_step",subject="test_subject"} 42
`)
monitor.stepSubjectWeight.WithLabelValues("test", "test_subject", "test_step").Set(42)
- err = testutil.GatherAndCompare(registry, expectedStepSubjectWeight, "cortex_scheduler_pipeline_step_weight_modification")
+ err = testutil.GatherAndCompare(registry, expectedStepSubjectWeight, "cortex_filter_weigher_pipeline_step_weight_modification")
if err != nil {
t.Fatalf("stepSubjectWeight test failed: %v", err)
}
// Test stepRemovedSubjectsObserver
expectedRemovedSubjectsObserver := strings.NewReader(`
- # HELP cortex_scheduler_pipeline_step_removed_subjects Number of subjects removed by scheduler pipeline step
- # TYPE cortex_scheduler_pipeline_step_removed_subjects histogram
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="1"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="2.154434690031884"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="4.641588833612779"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="10.000000000000002"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="21.544346900318843"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="46.4158883361278"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="100.00000000000003"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="215.44346900318845"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="464.15888336127813"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="1000.0000000000006"} 1
- cortex_scheduler_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="+Inf"} 1
- cortex_scheduler_pipeline_step_removed_subjects_sum{pipeline="test",step="test_step"} 1
- cortex_scheduler_pipeline_step_removed_subjects_count{pipeline="test",step="test_step"} 1
+ # HELP cortex_filter_weigher_pipeline_step_removed_subjects Number of subjects removed by scheduler pipeline step
+ # TYPE cortex_filter_weigher_pipeline_step_removed_subjects histogram
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="1"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="2.154434690031884"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="4.641588833612779"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="10.000000000000002"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="21.544346900318843"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="46.4158883361278"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="100.00000000000003"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="215.44346900318845"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="464.15888336127813"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="1000.0000000000006"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_bucket{pipeline="test",step="test_step",le="+Inf"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_sum{pipeline="test",step="test_step"} 1
+ cortex_filter_weigher_pipeline_step_removed_subjects_count{pipeline="test",step="test_step"} 1
`)
monitor.stepRemovedSubjectsObserver.WithLabelValues("test", "test_step").Observe(1)
- err = testutil.GatherAndCompare(registry, expectedRemovedSubjectsObserver, "cortex_scheduler_pipeline_step_removed_subjects")
+ err = testutil.GatherAndCompare(registry, expectedRemovedSubjectsObserver, "cortex_filter_weigher_pipeline_step_removed_subjects")
if err != nil {
t.Fatalf("stepRemovedSubjectsObserver test failed: %v", err)
}
// Test pipelineRunTimer
expectedPipelineRunTimer := strings.NewReader(`
- # HELP cortex_scheduler_pipeline_run_duration_seconds Duration of scheduler pipeline run
- # TYPE cortex_scheduler_pipeline_run_duration_seconds histogram
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.005"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.01"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.025"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.05"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.1"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.25"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.5"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="1"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="2.5"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="5"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="10"} 1
- cortex_scheduler_pipeline_run_duration_seconds_bucket{pipeline="test",le="+Inf"} 1
- cortex_scheduler_pipeline_run_duration_seconds_sum{pipeline="test"} 0
- cortex_scheduler_pipeline_run_duration_seconds_count{pipeline="test"} 1
+ # HELP cortex_filter_weigher_pipeline_run_duration_seconds Duration of scheduler pipeline run
+ # TYPE cortex_filter_weigher_pipeline_run_duration_seconds histogram
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.005"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.01"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.025"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.05"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.1"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.25"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="0.5"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="1"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="2.5"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="5"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="10"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_bucket{pipeline="test",le="+Inf"} 1
+ cortex_filter_weigher_pipeline_run_duration_seconds_sum{pipeline="test"} 0
+ cortex_filter_weigher_pipeline_run_duration_seconds_count{pipeline="test"} 1
`)
monitor.pipelineRunTimer.WithLabelValues("test").Observe(0)
- err = testutil.GatherAndCompare(registry, expectedPipelineRunTimer, "cortex_scheduler_pipeline_run_duration_seconds")
+ err = testutil.GatherAndCompare(registry, expectedPipelineRunTimer, "cortex_filter_weigher_pipeline_run_duration_seconds")
if err != nil {
t.Fatalf("pipelineRunTimer test failed: %v", err)
}
// Test requestCounter
expectedRequestCounter := strings.NewReader(`
- # HELP cortex_scheduler_pipeline_requests_total Total number of requests processed by the scheduler.
- # TYPE cortex_scheduler_pipeline_requests_total counter
- cortex_scheduler_pipeline_requests_total{pipeline="test"} 3
+ # HELP cortex_filter_weigher_pipeline_requests_total Total number of requests processed by the scheduler.
+ # TYPE cortex_filter_weigher_pipeline_requests_total counter
+ cortex_filter_weigher_pipeline_requests_total{pipeline="test"} 3
`)
monitor.requestCounter.WithLabelValues("test").Add(3)
- err = testutil.GatherAndCompare(registry, expectedRequestCounter, "cortex_scheduler_pipeline_requests_total")
+ err = testutil.GatherAndCompare(registry, expectedRequestCounter, "cortex_filter_weigher_pipeline_requests_total")
if err != nil {
t.Fatalf("requestCounter test failed: %v", err)
}
diff --git a/tools/plutono/provisioning/dashboards/cortex-status.json b/tools/plutono/provisioning/dashboards/cortex-status.json
index fee633efc..efb4b4588 100644
--- a/tools/plutono/provisioning/dashboards/cortex-status.json
+++ b/tools/plutono/provisioning/dashboards/cortex-status.json
@@ -571,7 +571,7 @@
"targets": [
{
"exemplar": false,
- "expr": "sum(delta(cortex_scheduler_pipeline_step_shift_origin_bucket{outidx=\"0\",pipeline=~\"nova-external-scheduler-.*\"}[2m]) / 2) by (le)",
+ "expr": "sum(delta(cortex_filter_weigher_pipeline_step_shift_origin_bucket{outidx=\"0\",pipeline=~\"nova-external-scheduler-.*\"}[2m]) / 2) by (le)",
"format": "heatmap",
"instant": false,
"interval": "",
@@ -647,7 +647,7 @@
"targets": [
{
"exemplar": false,
- "expr": "sum(delta(cortex_scheduler_pipeline_step_shift_origin_bucket{outidx=\"0\",pipeline=\"manila-external-scheduler\"}[2m]) / 2) by (le)",
+ "expr": "sum(delta(cortex_filter_weigher_pipeline_step_shift_origin_bucket{outidx=\"0\",pipeline=\"manila-external-scheduler\"}[2m]) / 2) by (le)",
"format": "heatmap",
"instant": false,
"interval": "",
@@ -733,7 +733,7 @@
"targets": [
{
"exemplar": true,
- "expr": "sum by (pipeline, subject, step, alias) (delta(cortex_scheduler_pipeline_step_weight_modification[2m]))",
+ "expr": "sum by (pipeline, subject, step, alias) (delta(cortex_filter_weigher_pipeline_step_weight_modification[2m]))",
"format": "time_series",
"instant": false,
"interval": "",
@@ -1265,7 +1265,7 @@
"targets": [
{
"exemplar": true,
- "expr": "histogram_quantile(0.95, sum(rate(cortex_scheduler_pipeline_step_run_duration_seconds_bucket[2m])) by (le, step, alias, pipeline))",
+ "expr": "histogram_quantile(0.95, sum(rate(cortex_filter_weigher_pipeline_step_run_duration_seconds_bucket[2m])) by (le, step, alias, pipeline))",
"interval": "",
"legendFormat": "{{pipeline}} {{step}}",
"refId": "A"
@@ -1372,7 +1372,7 @@
"targets": [
{
"exemplar": true,
- "expr": "sum by(pipeline) (rate(cortex_scheduler_pipeline_requests_total{}[2m]))",
+ "expr": "sum by(pipeline) (rate(cortex_filter_weigher_pipeline_requests_total{}[2m]))",
"interval": "",
"legendFormat": "Pipeline: {{pipeline}}",
"refId": "A"
@@ -2222,7 +2222,7 @@
"targets": [
{
"exemplar": true,
- "expr": "histogram_quantile(0.95, sum(rate(cortex_descheduler_pipeline_step_run_duration_seconds_bucket[2m])) by (le, step))",
+ "expr": "histogram_quantile(0.95, sum(rate(cortex_detector_pipeline_step_run_duration_seconds_bucket[2m])) by (le, step))",
"interval": "",
"legendFormat": "{{step}}",
"refId": "A"
From e6573590df6f065211a4462ad4334cc327d710c2 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 11:22:19 +0100
Subject: [PATCH 38/41] Improve coverage in scheduling/nova
---
.../nova/detector_cycle_breaker_test.go | 75 +++++++
.../nova/external_scheduler_api_test.go | 205 ++++++++++++++++++
.../detectors/avoid_high_steal_pct_test.go | 99 +++++++++
.../nova/plugins/vm_detection_test.go | 196 +++++++++++++++++
...mware_anti_affinity_noisy_projects_test.go | 166 +++++++++++++-
...re_avoid_long_term_contended_hosts_test.go | 151 ++++++++++++-
...e_avoid_short_term_contended_hosts_test.go | 151 ++++++++++++-
.../vmware_general_purpose_balancing_test.go | 97 ++++++++-
.../weighers/vmware_hana_binpacking_test.go | 97 ++++++++-
9 files changed, 1225 insertions(+), 12 deletions(-)
create mode 100644 internal/scheduling/nova/plugins/vm_detection_test.go
diff --git a/internal/scheduling/nova/detector_cycle_breaker_test.go b/internal/scheduling/nova/detector_cycle_breaker_test.go
index 50745cdd0..a242ff0d0 100644
--- a/internal/scheduling/nova/detector_cycle_breaker_test.go
+++ b/internal/scheduling/nova/detector_cycle_breaker_test.go
@@ -11,6 +11,7 @@ import (
"github.com/cobaltcore-dev/cortex/internal/scheduling/nova/plugins"
"github.com/cobaltcore-dev/cortex/pkg/conf"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
type mockDetectorCycleBreakerNovaAPI struct {
@@ -245,3 +246,77 @@ func TestDetectorCycleBreaker_Filter_EmptyVMDetections(t *testing.T) {
t.Errorf("expected empty result for empty input, got %d decisions", len(result))
}
}
+
+func TestNewDetectorCycleBreaker(t *testing.T) {
+ detector := NewDetectorCycleBreaker()
+
+ if detector == nil {
+ t.Fatal("expected non-nil detector")
+ }
+
+ // Verify it's the correct type
+ _, ok := detector.(*detectorCycleBreaker)
+ if !ok {
+ t.Errorf("expected *detectorCycleBreaker, got %T", detector)
+ }
+
+ // Verify the novaAPI field is initialized
+ detectorImpl := detector.(*detectorCycleBreaker)
+ if detectorImpl.novaAPI == nil {
+ t.Error("expected novaAPI to be initialized")
+ }
+}
+
+func TestDetectorCycleBreaker_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ setupMock func() NovaAPI
+ expectErr bool
+ }{
+ {
+ name: "successful initialization",
+ setupMock: func() NovaAPI {
+ return &mockDetectorCycleBreakerNovaAPI{}
+ },
+ expectErr: false,
+ },
+ {
+ name: "initialization with error",
+ setupMock: func() NovaAPI {
+ return &mockDetectorCycleBreakerNovaAPIWithInitError{}
+ },
+ expectErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ detector := &detectorCycleBreaker{
+ novaAPI: tt.setupMock(),
+ }
+
+ ctx := context.Background()
+ fakeClient := fake.NewClientBuilder().Build()
+ cfg := conf.Config{}
+
+ err := detector.Init(ctx, fakeClient, cfg)
+
+ if tt.expectErr && err == nil {
+ t.Error("expected error but got none")
+ }
+
+ if !tt.expectErr && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ })
+ }
+}
+
+// mockDetectorCycleBreakerNovaAPIWithInitError is a mock that returns an error on Init
+type mockDetectorCycleBreakerNovaAPIWithInitError struct {
+ mockDetectorCycleBreakerNovaAPI
+}
+
+func (m *mockDetectorCycleBreakerNovaAPIWithInitError) Init(ctx context.Context, client client.Client, conf conf.Config) error {
+ return errors.New("init error")
+}
diff --git a/internal/scheduling/nova/external_scheduler_api_test.go b/internal/scheduling/nova/external_scheduler_api_test.go
index 0f06a47d4..6ac7706ed 100644
--- a/internal/scheduling/nova/external_scheduler_api_test.go
+++ b/internal/scheduling/nova/external_scheduler_api_test.go
@@ -393,3 +393,208 @@ func TestHTTPAPI_NovaExternalScheduler_DecisionCreation(t *testing.T) {
t.Error("NovaRaw should not be nil")
}
}
+
+func TestHTTPAPI_inferPipelineName(t *testing.T) {
+ config := conf.Config{SchedulingDomain: "test-operator"}
+ delegate := &mockHTTPAPIDelegate{}
+ api := NewAPI(config, delegate).(*httpAPI)
+
+ tests := []struct {
+ name string
+ requestData novaapi.ExternalSchedulerRequest
+ expectedResult string
+ expectErr bool
+ errContains string
+ }{
+ {
+ name: "qemu hypervisor without reservation",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "qemu",
+ },
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectedResult: "nova-external-scheduler-kvm",
+ expectErr: false,
+ },
+ {
+ name: "qemu hypervisor with reservation",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "qemu",
+ },
+ },
+ },
+ },
+ },
+ Reservation: true,
+ },
+ expectedResult: "nova-external-scheduler-kvm-all-filters-enabled",
+ expectErr: false,
+ },
+ {
+ name: "QEMU hypervisor uppercase",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "QEMU",
+ },
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectedResult: "nova-external-scheduler-kvm",
+ expectErr: false,
+ },
+ {
+ name: "ch hypervisor without reservation",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "ch",
+ },
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectedResult: "nova-external-scheduler-kvm",
+ expectErr: false,
+ },
+ {
+ name: "ch hypervisor with reservation",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "ch",
+ },
+ },
+ },
+ },
+ },
+ Reservation: true,
+ },
+ expectedResult: "nova-external-scheduler-kvm-all-filters-enabled",
+ expectErr: false,
+ },
+ {
+ name: "vmware hypervisor without reservation",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "VMware vCenter Server",
+ },
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectedResult: "nova-external-scheduler-vmware",
+ expectErr: false,
+ },
+ {
+ name: "vmware hypervisor with reservation - error",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "VMware vCenter Server",
+ },
+ },
+ },
+ },
+ },
+ Reservation: true,
+ },
+ expectErr: true,
+ errContains: "reservations are not supported on vmware hypervisors",
+ },
+ {
+ name: "missing hypervisor_type",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{},
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectErr: true,
+ errContains: "missing hypervisor_type",
+ },
+ {
+ name: "unsupported hypervisor_type",
+ requestData: novaapi.ExternalSchedulerRequest{
+ Spec: novaapi.NovaObject[novaapi.NovaSpec]{
+ Data: novaapi.NovaSpec{
+ Flavor: novaapi.NovaObject[novaapi.NovaFlavor]{
+ Data: novaapi.NovaFlavor{
+ ExtraSpecs: map[string]string{
+ "capabilities:hypervisor_type": "unknown-hypervisor",
+ },
+ },
+ },
+ },
+ },
+ Reservation: false,
+ },
+ expectErr: true,
+ errContains: "unsupported hypervisor_type",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := api.inferPipelineName(tt.requestData)
+
+ if tt.expectErr {
+ if err == nil {
+ t.Error("expected error but got none")
+ } else if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
+ t.Errorf("expected error to contain '%s', got '%s'", tt.errContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if result != tt.expectedResult {
+ t.Errorf("expected pipeline name '%s', got '%s'", tt.expectedResult, result)
+ }
+ }
+ })
+ }
+}
diff --git a/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
index 1e89f875a..30ec0d670 100644
--- a/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
+++ b/internal/scheduling/nova/plugins/detectors/avoid_high_steal_pct_test.go
@@ -4,11 +4,14 @@
package detectors
import (
+ "context"
+ "strings"
"testing"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -19,6 +22,102 @@ type VMDetection struct {
Host string
}
+func TestAvoidHighStealPctStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{"maxStealPctOverObservedTimeSpan": 80.0}`),
+ }
+
+ tests := []struct {
+ name string
+ knowledge *v1alpha1.Knowledge
+ detectorSpec v1alpha1.DetectorSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledge",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "kvm-libvirt-domain-cpu-steal-pct"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ RawLength: 10,
+ },
+ },
+ detectorSpec: v1alpha1.DetectorSpec{
+ Name: "avoid_high_steal_pct",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when knowledge doesn't exist",
+ knowledge: nil,
+ detectorSpec: v1alpha1.DetectorSpec{
+ Name: "avoid_high_steal_pct",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ {
+ name: "fails when knowledge not ready",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "kvm-libvirt-domain-cpu-steal-pct"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionFalse,
+ },
+ },
+ RawLength: 0,
+ },
+ },
+ detectorSpec: v1alpha1.DetectorSpec{
+ Name: "avoid_high_steal_pct",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "not ready",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ if tt.knowledge != nil {
+ builder = builder.WithObjects(tt.knowledge)
+ }
+ client := builder.Build()
+
+ step := &AvoidHighStealPctStep{}
+ err := step.Init(context.Background(), client, tt.detectorSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
func TestAvoidHighStealPctStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
diff --git a/internal/scheduling/nova/plugins/vm_detection_test.go b/internal/scheduling/nova/plugins/vm_detection_test.go
new file mode 100644
index 000000000..cc8d7076e
--- /dev/null
+++ b/internal/scheduling/nova/plugins/vm_detection_test.go
@@ -0,0 +1,196 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package plugins
+
+import (
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+)
+
+func TestVMDetection_GetResource(t *testing.T) {
+ tests := []struct {
+ name string
+ vmID string
+ expected string
+ }{
+ {
+ name: "returns VM ID",
+ vmID: "vm-123",
+ expected: "vm-123",
+ },
+ {
+ name: "returns empty string when VM ID is empty",
+ vmID: "",
+ expected: "",
+ },
+ {
+ name: "returns UUID format VM ID",
+ vmID: "550e8400-e29b-41d4-a716-446655440000",
+ expected: "550e8400-e29b-41d4-a716-446655440000",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ d := VMDetection{VMID: tt.vmID}
+ if got := d.GetResource(); got != tt.expected {
+ t.Errorf("GetResource() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
+
+func TestVMDetection_GetReason(t *testing.T) {
+ tests := []struct {
+ name string
+ reason string
+ expected string
+ }{
+ {
+ name: "returns reason",
+ reason: "high CPU usage",
+ expected: "high CPU usage",
+ },
+ {
+ name: "returns empty string when reason is empty",
+ reason: "",
+ expected: "",
+ },
+ {
+ name: "returns detailed reason",
+ reason: "kvm monitoring indicates cpu steal pct 85.50% which is above 80.00% threshold",
+ expected: "kvm monitoring indicates cpu steal pct 85.50% which is above 80.00% threshold",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ d := VMDetection{Reason: tt.reason}
+ if got := d.GetReason(); got != tt.expected {
+ t.Errorf("GetReason() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
+
+func TestVMDetection_GetHost(t *testing.T) {
+ tests := []struct {
+ name string
+ host string
+ expected string
+ }{
+ {
+ name: "returns host",
+ host: "compute-host-1",
+ expected: "compute-host-1",
+ },
+ {
+ name: "returns empty string when host is empty",
+ host: "",
+ expected: "",
+ },
+ {
+ name: "returns FQDN host",
+ host: "compute-host-1.example.com",
+ expected: "compute-host-1.example.com",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ d := VMDetection{Host: tt.host}
+ if got := d.GetHost(); got != tt.expected {
+ t.Errorf("GetHost() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
+
+func TestVMDetection_WithReason(t *testing.T) {
+ tests := []struct {
+ name string
+ initialReason string
+ newReason string
+ expectedVMID string
+ expectedHost string
+ }{
+ {
+ name: "sets new reason",
+ initialReason: "old reason",
+ newReason: "new reason",
+ expectedVMID: "vm-123",
+ expectedHost: "host-1",
+ },
+ {
+ name: "sets reason from empty",
+ initialReason: "",
+ newReason: "new reason",
+ expectedVMID: "vm-456",
+ expectedHost: "host-2",
+ },
+ {
+ name: "clears reason",
+ initialReason: "existing reason",
+ newReason: "",
+ expectedVMID: "vm-789",
+ expectedHost: "host-3",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ d := VMDetection{
+ VMID: tt.expectedVMID,
+ Reason: tt.initialReason,
+ Host: tt.expectedHost,
+ }
+
+ result := d.WithReason(tt.newReason)
+
+ // Check that the reason was updated
+ if got := result.GetReason(); got != tt.newReason {
+ t.Errorf("WithReason() reason = %v, want %v", got, tt.newReason)
+ }
+
+ // Check that VMID is preserved
+ if got := result.GetResource(); got != tt.expectedVMID {
+ t.Errorf("WithReason() preserved VMID = %v, want %v", got, tt.expectedVMID)
+ }
+
+ // Check that Host is preserved
+ if got := result.GetHost(); got != tt.expectedHost {
+ t.Errorf("WithReason() preserved Host = %v, want %v", got, tt.expectedHost)
+ }
+ })
+ }
+}
+
+func TestVMDetection_ImplementsDetectionInterface(t *testing.T) {
+ // Verify that VMDetection implements the lib.Detection interface
+ var _ lib.Detection = VMDetection{}
+ var _ lib.Detection = &VMDetection{}
+
+ d := VMDetection{
+ VMID: "test-vm",
+ Reason: "test reason",
+ Host: "test-host",
+ }
+
+ // Verify interface methods work correctly
+ if d.GetResource() != "test-vm" {
+ t.Error("GetResource() interface method not working")
+ }
+ if d.GetReason() != "test reason" {
+ t.Error("GetReason() interface method not working")
+ }
+ if d.GetHost() != "test-host" {
+ t.Error("GetHost() interface method not working")
+ }
+
+ updated := d.WithReason("updated reason")
+ if updated.GetReason() != "updated reason" {
+ t.Error("WithReason() interface method not working")
+ }
+}
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go b/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
index af653fd7a..bce66198d 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_anti_affinity_noisy_projects_test.go
@@ -4,16 +4,178 @@
package weighers
import (
+ "context"
"log/slog"
+ "strings"
"testing"
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestVMwareAntiAffinityNoisyProjectsStepOpts_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ opts VMwareAntiAffinityNoisyProjectsStepOpts
+ wantError bool
+ }{
+ {
+ name: "valid opts with different bounds",
+ opts: VMwareAntiAffinityNoisyProjectsStepOpts{
+ AvgCPUUsageLowerBound: 20.0,
+ AvgCPUUsageUpperBound: 100.0,
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -0.5,
+ },
+ wantError: false,
+ },
+ {
+ name: "invalid opts - equal bounds causes zero division",
+ opts: VMwareAntiAffinityNoisyProjectsStepOpts{
+ AvgCPUUsageLowerBound: 50.0,
+ AvgCPUUsageUpperBound: 50.0, // Same as lower bound
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -0.5,
+ },
+ wantError: true,
+ },
+ {
+ name: "valid opts with zero bounds",
+ opts: VMwareAntiAffinityNoisyProjectsStepOpts{
+ AvgCPUUsageLowerBound: 0.0,
+ AvgCPUUsageUpperBound: 100.0,
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: 1.0,
+ },
+ wantError: false,
+ },
+ {
+ name: "valid opts with negative values",
+ opts: VMwareAntiAffinityNoisyProjectsStepOpts{
+ AvgCPUUsageLowerBound: -10.0,
+ AvgCPUUsageUpperBound: 10.0,
+ AvgCPUUsageActivationLowerBound: -1.0,
+ AvgCPUUsageActivationUpperBound: 1.0,
+ },
+ wantError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.opts.Validate()
+ if (err != nil) != tt.wantError {
+ t.Errorf("Validate() error = %v, wantError %v", err, tt.wantError)
+ }
+ })
+ }
+}
+
+func TestVMwareAntiAffinityNoisyProjectsStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Valid params JSON for the weigher
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{"avgCPUUsageLowerBound": 20.0, "avgCPUUsageUpperBound": 100.0, "avgCPUUsageActivationLowerBound": 0.0, "avgCPUUsageActivationUpperBound": -0.5}`),
+ }
+
+ tests := []struct {
+ name string
+ knowledge *v1alpha1.Knowledge
+ weigherSpec v1alpha1.WeigherSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledge",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-project-noisiness"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ RawLength: 10,
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_anti_affinity_noisy_projects",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when knowledge doesn't exist",
+ knowledge: nil,
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_anti_affinity_noisy_projects",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ {
+ name: "fails when knowledge not ready",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-project-noisiness"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionFalse,
+ },
+ },
+ RawLength: 0,
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_anti_affinity_noisy_projects",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "not ready",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ if tt.knowledge != nil {
+ builder = builder.WithObjects(tt.knowledge)
+ }
+ client := builder.Build()
+
+ step := &VMwareAntiAffinityNoisyProjectsStep{}
+ err := step.Init(context.Background(), client, tt.weigherSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !containsString(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func containsString(s, substr string) bool {
+ return strings.Contains(s, substr)
+}
+
func TestVMwareAntiAffinityNoisyProjectsStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
@@ -37,7 +199,7 @@ func TestVMwareAntiAffinityNoisyProjectsStep_Run(t *testing.T) {
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "vmware-project-noisiness"},
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-project-noisiness"},
Status: v1alpha1.KnowledgeStatus{Raw: vropsProjectNoisiness},
}).
Build()
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
index 72a7378a4..052d39a1f 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_avoid_long_term_contended_hosts_test.go
@@ -4,16 +4,163 @@
package weighers
import (
+ "context"
"log/slog"
+ "strings"
"testing"
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestVMwareAvoidLongTermContendedHostsStepOpts_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ opts VMwareAvoidLongTermContendedHostsStepOpts
+ wantError bool
+ }{
+ {
+ name: "valid opts with different bounds",
+ opts: VMwareAvoidLongTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 0.0,
+ AvgCPUContentionUpperBound: 100.0,
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 0.0,
+ MaxCPUContentionUpperBound: 100.0,
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: false,
+ },
+ {
+ name: "invalid opts - equal avg bounds",
+ opts: VMwareAvoidLongTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 50.0,
+ AvgCPUContentionUpperBound: 50.0, // Same as lower
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 0.0,
+ MaxCPUContentionUpperBound: 100.0,
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: true,
+ },
+ {
+ name: "invalid opts - equal max bounds",
+ opts: VMwareAvoidLongTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 0.0,
+ AvgCPUContentionUpperBound: 100.0,
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 50.0,
+ MaxCPUContentionUpperBound: 50.0, // Same as lower
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.opts.Validate()
+ if (err != nil) != tt.wantError {
+ t.Errorf("Validate() error = %v, wantError %v", err, tt.wantError)
+ }
+ })
+ }
+}
+
+func TestVMwareAvoidLongTermContendedHostsStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{
+ "avgCPUContentionLowerBound": 0,
+ "avgCPUContentionUpperBound": 100,
+ "avgCPUContentionActivationLowerBound": 0,
+ "avgCPUContentionActivationUpperBound": -1,
+ "maxCPUContentionLowerBound": 0,
+ "maxCPUContentionUpperBound": 100,
+ "maxCPUContentionActivationLowerBound": 0,
+ "maxCPUContentionActivationUpperBound": -1
+ }`),
+ }
+
+ tests := []struct {
+ name string
+ knowledge *v1alpha1.Knowledge
+ weigherSpec v1alpha1.WeigherSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledge",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-long-term-contended-hosts"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ RawLength: 10,
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_avoid_long_term_contended_hosts",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when knowledge doesn't exist",
+ knowledge: nil,
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_avoid_long_term_contended_hosts",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ if tt.knowledge != nil {
+ builder = builder.WithObjects(tt.knowledge)
+ }
+ client := builder.Build()
+
+ step := &VMwareAvoidLongTermContendedHostsStep{}
+ err := step.Init(context.Background(), client, tt.weigherSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
func TestVMwareAvoidLongTermContendedHostsStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
@@ -43,7 +190,7 @@ func TestVMwareAvoidLongTermContendedHostsStep_Run(t *testing.T) {
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "vmware-long-term-contended-hosts"},
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-long-term-contended-hosts"},
Status: v1alpha1.KnowledgeStatus{Raw: vropsHostsystemContentionLongTerm},
}).
Build()
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go b/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
index 25cbded43..13bd1cbd6 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_avoid_short_term_contended_hosts_test.go
@@ -4,16 +4,163 @@
package weighers
import (
+ "context"
"log/slog"
+ "strings"
"testing"
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestVMwareAvoidShortTermContendedHostsStepOpts_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ opts VMwareAvoidShortTermContendedHostsStepOpts
+ wantError bool
+ }{
+ {
+ name: "valid opts with different bounds",
+ opts: VMwareAvoidShortTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 0.0,
+ AvgCPUContentionUpperBound: 100.0,
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 0.0,
+ MaxCPUContentionUpperBound: 100.0,
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: false,
+ },
+ {
+ name: "invalid opts - equal avg bounds",
+ opts: VMwareAvoidShortTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 50.0,
+ AvgCPUContentionUpperBound: 50.0, // Same as lower
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 0.0,
+ MaxCPUContentionUpperBound: 100.0,
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: true,
+ },
+ {
+ name: "invalid opts - equal max bounds",
+ opts: VMwareAvoidShortTermContendedHostsStepOpts{
+ AvgCPUContentionLowerBound: 0.0,
+ AvgCPUContentionUpperBound: 100.0,
+ AvgCPUContentionActivationLowerBound: 0.0,
+ AvgCPUContentionActivationUpperBound: -1.0,
+ MaxCPUContentionLowerBound: 50.0,
+ MaxCPUContentionUpperBound: 50.0, // Same as lower
+ MaxCPUContentionActivationLowerBound: 0.0,
+ MaxCPUContentionActivationUpperBound: -1.0,
+ },
+ wantError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.opts.Validate()
+ if (err != nil) != tt.wantError {
+ t.Errorf("Validate() error = %v, wantError %v", err, tt.wantError)
+ }
+ })
+ }
+}
+
+func TestVMwareAvoidShortTermContendedHostsStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{
+ "avgCPUContentionLowerBound": 0,
+ "avgCPUContentionUpperBound": 100,
+ "avgCPUContentionActivationLowerBound": 0,
+ "avgCPUContentionActivationUpperBound": -1,
+ "maxCPUContentionLowerBound": 0,
+ "maxCPUContentionUpperBound": 100,
+ "maxCPUContentionActivationLowerBound": 0,
+ "maxCPUContentionActivationUpperBound": -1
+ }`),
+ }
+
+ tests := []struct {
+ name string
+ knowledge *v1alpha1.Knowledge
+ weigherSpec v1alpha1.WeigherSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledge",
+ knowledge: &v1alpha1.Knowledge{
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-short-term-contended-hosts"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ RawLength: 10,
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_avoid_short_term_contended_hosts",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when knowledge doesn't exist",
+ knowledge: nil,
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_avoid_short_term_contended_hosts",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ if tt.knowledge != nil {
+ builder = builder.WithObjects(tt.knowledge)
+ }
+ client := builder.Build()
+
+ step := &VMwareAvoidShortTermContendedHostsStep{}
+ err := step.Init(context.Background(), client, tt.weigherSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
func TestVMwareAvoidShortTermContendedHostsStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
@@ -43,7 +190,7 @@ func TestVMwareAvoidShortTermContendedHostsStep_Run(t *testing.T) {
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "vmware-short-term-contended-hosts"},
+ ObjectMeta: metav1.ObjectMeta{Name: "vmware-short-term-contended-hosts"},
Status: v1alpha1.KnowledgeStatus{Raw: vropsHostsystemContentionShortTerm},
}).
Build()
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go b/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
index 446a0ec9e..6a33c2762 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_general_purpose_balancing_test.go
@@ -4,13 +4,16 @@
package weighers
import (
+ "context"
"log/slog"
+ "strings"
"testing"
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -48,6 +51,94 @@ func TestVMwareGeneralPurposeBalancingStepOpts_Validate(t *testing.T) {
}
}
+func TestVMwareGeneralPurposeBalancingStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{
+ "ramUtilizedLowerBoundPct": 20.0,
+ "ramUtilizedUpperBoundPct": 80.0,
+ "ramUtilizedActivationLowerBound": 0.0,
+ "ramUtilizedActivationUpperBound": 1.0
+ }`),
+ }
+
+ tests := []struct {
+ name string
+ knowledges []*v1alpha1.Knowledge
+ weigherSpec v1alpha1.WeigherSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledges",
+ knowledges: []*v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {Type: v1alpha1.KnowledgeConditionReady, Status: metav1.ConditionTrue},
+ },
+ RawLength: 10,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {Type: v1alpha1.KnowledgeConditionReady, Status: metav1.ConditionTrue},
+ },
+ RawLength: 10,
+ },
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_general_purpose_balancing",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when host-utilization knowledge doesn't exist",
+ knowledges: nil,
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_general_purpose_balancing",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ for _, k := range tt.knowledges {
+ builder = builder.WithObjects(k)
+ }
+ client := builder.Build()
+
+ step := &VMwareGeneralPurposeBalancingStep{}
+ err := step.Init(context.Background(), client, tt.weigherSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
func TestVMwareGeneralPurposeBalancingStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
@@ -105,11 +196,11 @@ func TestVMwareGeneralPurposeBalancingStep_Run(t *testing.T) {
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "host-utilization"},
+ ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"},
Status: v1alpha1.KnowledgeStatus{Raw: hostUtilizations},
}).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "host-capabilities"},
+ ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"},
Status: v1alpha1.KnowledgeStatus{Raw: hostCapabilities},
}).
Build()
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go b/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go
index 15066c044..72612a05b 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_hana_binpacking_test.go
@@ -4,13 +4,16 @@
package weighers
import (
+ "context"
"log/slog"
+ "strings"
"testing"
api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -48,6 +51,94 @@ func TestVMwareHanaBinpackingStepOpts_Validate(t *testing.T) {
}
}
+func TestVMwareHanaBinpackingStep_Init(t *testing.T) {
+ scheme, err := v1alpha1.SchemeBuilder.Build()
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ validParams := runtime.RawExtension{
+ Raw: []byte(`{
+ "ramUtilizedAfterLowerBoundPct": 30.0,
+ "ramUtilizedAfterUpperBoundPct": 80.0,
+ "ramUtilizedAfterActivationLowerBound": 0.0,
+ "ramUtilizedAfterActivationUpperBound": 1.0
+ }`),
+ }
+
+ tests := []struct {
+ name string
+ knowledges []*v1alpha1.Knowledge
+ weigherSpec v1alpha1.WeigherSpec
+ wantError bool
+ errorContains string
+ }{
+ {
+ name: "successful init with valid knowledges",
+ knowledges: []*v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {Type: v1alpha1.KnowledgeConditionReady, Status: metav1.ConditionTrue},
+ },
+ RawLength: 10,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"},
+ Status: v1alpha1.KnowledgeStatus{
+ Conditions: []metav1.Condition{
+ {Type: v1alpha1.KnowledgeConditionReady, Status: metav1.ConditionTrue},
+ },
+ RawLength: 10,
+ },
+ },
+ },
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_hana_binpacking",
+ Params: validParams,
+ },
+ wantError: false,
+ },
+ {
+ name: "fails when host-utilization knowledge doesn't exist",
+ knowledges: nil,
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "vmware_hana_binpacking",
+ Params: validParams,
+ },
+ wantError: true,
+ errorContains: "failed to get knowledge",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := fake.NewClientBuilder().WithScheme(scheme)
+ for _, k := range tt.knowledges {
+ builder = builder.WithObjects(k)
+ }
+ client := builder.Build()
+
+ step := &VMwareHanaBinpackingStep{}
+ err := step.Init(context.Background(), client, tt.weigherSpec)
+
+ if tt.wantError {
+ if err == nil {
+ t.Error("expected error, got nil")
+ } else if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
func TestVMwareHanaBinpackingStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
@@ -102,11 +193,11 @@ func TestVMwareHanaBinpackingStep_Run(t *testing.T) {
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "host-utilization"},
+ ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"},
Status: v1alpha1.KnowledgeStatus{Raw: hostUtilizations},
}).
WithObjects(&v1alpha1.Knowledge{
- ObjectMeta: v1.ObjectMeta{Name: "host-capabilities"},
+ ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"},
Status: v1alpha1.KnowledgeStatus{Raw: hostCapabilities},
}).
Build()
From 2a81d7c68ba928689e86adb2e2c6195c72a4455e Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 11:32:31 +0100
Subject: [PATCH 39/41] Improve coverage in scheduling/lib
---
internal/scheduling/lib/detector_test.go | 202 +++++++++++++++
internal/scheduling/lib/filter_test.go | 66 +++++
.../scheduling/lib/filter_validation_test.go | 187 ++++++++++++++
.../filter_weigher_pipeline_step_opts_test.go | 11 +
.../lib/filter_weigher_pipeline_step_test.go | 185 +++++++++++++-
internal/scheduling/lib/weigher_test.go | 229 ++++++++++++++++++
.../scheduling/lib/weigher_validation_test.go | 72 ++++++
7 files changed, 951 insertions(+), 1 deletion(-)
diff --git a/internal/scheduling/lib/detector_test.go b/internal/scheduling/lib/detector_test.go
index 51ef00eaa..e80f07a7a 100644
--- a/internal/scheduling/lib/detector_test.go
+++ b/internal/scheduling/lib/detector_test.go
@@ -4,9 +4,12 @@
package lib
import (
+ "strings"
"testing"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -52,3 +55,202 @@ func TestDetector_Init(t *testing.T) {
t.Errorf("expected Option2 to be 2, got %d", step.Options.Option2)
}
}
+
+func TestDetector_Init_InvalidJSON(t *testing.T) {
+ step := BaseDetector[mockDetectorOptions]{}
+ cl := fake.NewClientBuilder().Build()
+ err := step.Init(t.Context(), cl, v1alpha1.DetectorSpec{
+ Params: runtime.RawExtension{Raw: []byte(`{invalid json}`)},
+ })
+ if err == nil {
+ t.Fatal("expected error for invalid JSON, got nil")
+ }
+}
+
+func TestBaseDetector_CheckKnowledges(t *testing.T) {
+ scheme := runtime.NewScheme()
+ if err := v1alpha1.AddToScheme(scheme); err != nil {
+ t.Fatalf("failed to add scheme: %v", err)
+ }
+
+ tests := []struct {
+ name string
+ knowledges []v1alpha1.Knowledge
+ refs []corev1.ObjectReference
+ expectError bool
+ errorMsg string
+ }{
+ {
+ name: "all knowledges ready",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: false,
+ },
+ {
+ name: "knowledge not found",
+ knowledges: []v1alpha1.Knowledge{},
+ refs: []corev1.ObjectReference{
+ {Name: "missing-knowledge", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "failed to get knowledge",
+ },
+ {
+ name: "knowledge not ready - condition false",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionFalse,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "not ready",
+ },
+ {
+ name: "knowledge not ready - no data",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 0,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "no data available",
+ },
+ {
+ name: "empty knowledge list",
+ knowledges: []v1alpha1.Knowledge{},
+ refs: []corev1.ObjectReference{},
+ expectError: false,
+ },
+ {
+ name: "multiple knowledges all ready",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge2",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 5,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ {Name: "knowledge2", Namespace: "default"},
+ },
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ clientBuilder := fake.NewClientBuilder().WithScheme(scheme)
+ for i := range tt.knowledges {
+ clientBuilder = clientBuilder.WithObjects(&tt.knowledges[i])
+ }
+ cl := clientBuilder.Build()
+
+ detector := &BaseDetector[mockDetectorOptions]{
+ Client: cl,
+ }
+
+ err := detector.CheckKnowledges(t.Context(), tt.refs...)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if tt.expectError && err != nil && tt.errorMsg != "" {
+ if !strings.Contains(err.Error(), tt.errorMsg) {
+ t.Errorf("expected error message to contain %q, got %q", tt.errorMsg, err.Error())
+ }
+ }
+ })
+ }
+}
+
+func TestBaseDetector_CheckKnowledges_NilClient(t *testing.T) {
+ detector := &BaseDetector[mockDetectorOptions]{
+ Client: nil,
+ }
+
+ err := detector.CheckKnowledges(t.Context(), corev1.ObjectReference{Name: "test", Namespace: "default"})
+
+ if err == nil {
+ t.Error("expected error for nil client but got nil")
+ }
+ if !strings.Contains(err.Error(), "client not initialized") {
+ t.Errorf("expected error message about client not initialized, got %q", err.Error())
+ }
+}
diff --git a/internal/scheduling/lib/filter_test.go b/internal/scheduling/lib/filter_test.go
index a321a74a6..ac7c4d1d7 100644
--- a/internal/scheduling/lib/filter_test.go
+++ b/internal/scheduling/lib/filter_test.go
@@ -6,9 +6,12 @@ package lib
import (
"context"
"log/slog"
+ "testing"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
type mockFilter[RequestType FilterWeigherPipelineRequest] struct {
@@ -28,3 +31,66 @@ func (m *mockFilter[RequestType]) Run(traceLog *slog.Logger, request RequestType
}
return m.RunFunc(traceLog, request)
}
+
+// filterTestOptions implements FilterWeigherPipelineStepOpts for testing.
+type filterTestOptions struct{}
+
+func (o filterTestOptions) Validate() error { return nil }
+
+func TestBaseFilter_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ filterSpec v1alpha1.FilterSpec
+ expectError bool
+ }{
+ {
+ name: "successful initialization with valid params",
+ filterSpec: v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "successful initialization with empty params",
+ filterSpec: v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "error on invalid JSON params",
+ filterSpec: v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{invalid json}`),
+ },
+ },
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ filter := &BaseFilter[mockFilterWeigherPipelineRequest, filterTestOptions]{}
+ cl := fake.NewClientBuilder().Build()
+
+ err := filter.Init(t.Context(), cl, tt.filterSpec)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if !tt.expectError && filter.Client == nil {
+ t.Error("expected client to be set but it was nil")
+ }
+ })
+ }
+}
diff --git a/internal/scheduling/lib/filter_validation_test.go b/internal/scheduling/lib/filter_validation_test.go
index f5e4e9efe..750ef5b62 100644
--- a/internal/scheduling/lib/filter_validation_test.go
+++ b/internal/scheduling/lib/filter_validation_test.go
@@ -2,3 +2,190 @@
// SPDX-License-Identifier: Apache-2.0
package lib
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestValidateFilter(t *testing.T) {
+ filter := &mockFilter[mockFilterWeigherPipelineRequest]{}
+ validator := validateFilter(filter)
+
+ if validator == nil {
+ t.Fatal("expected validator but got nil")
+ }
+ if validator.Filter != filter {
+ t.Error("expected filter to be set in validator")
+ }
+}
+
+func TestFilterValidator_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ filterSpec v1alpha1.FilterSpec
+ initError error
+ expectError bool
+ }{
+ {
+ name: "successful initialization",
+ filterSpec: v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ initError: nil,
+ expectError: false,
+ },
+ {
+ name: "initialization error",
+ filterSpec: v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ initError: errors.New("init error"),
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ filter := &mockFilter[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(_ context.Context, _ client.Client, _ v1alpha1.FilterSpec) error {
+ return tt.initError
+ },
+ }
+ validator := validateFilter(filter)
+ cl := fake.NewClientBuilder().Build()
+
+ err := validator.Init(t.Context(), cl, tt.filterSpec)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ })
+ }
+}
+
+func TestFilterValidator_Run(t *testing.T) {
+ tests := []struct {
+ name string
+ subjects []string
+ runResult *FilterWeigherPipelineStepResult
+ runError error
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "successful run - filter removes some subjects",
+ subjects: []string{"host1", "host2", "host3"},
+ runResult: &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ },
+ },
+ runError: nil,
+ expectError: false,
+ },
+ {
+ name: "successful run - filter keeps all subjects",
+ subjects: []string{"host1", "host2"},
+ runResult: &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ },
+ },
+ runError: nil,
+ expectError: false,
+ },
+ {
+ name: "run error from filter",
+ subjects: []string{"host1"},
+ runResult: nil,
+ runError: errors.New("filter error"),
+ expectError: true,
+ },
+ {
+ name: "validation error - subjects increased",
+ subjects: []string{"host1"},
+ runResult: &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ "host3": 1.0,
+ },
+ },
+ runError: nil,
+ expectError: true,
+ errorContains: "number of subjects increased",
+ },
+ {
+ name: "handle duplicate subjects in request",
+ subjects: []string{"host1", "host1", "host2"},
+ runResult: &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{
+ "host1": 1.0,
+ "host2": 1.0,
+ },
+ },
+ runError: nil,
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ filter := &mockFilter[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return tt.runResult, tt.runError
+ },
+ }
+ validator := validateFilter(filter)
+ request := mockFilterWeigherPipelineRequest{
+ Subjects: tt.subjects,
+ }
+ traceLog := slog.Default()
+
+ result, err := validator.Run(traceLog, request)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if tt.expectError && tt.errorContains != "" && err != nil {
+ if !containsStr(err.Error(), tt.errorContains) {
+ t.Errorf("expected error to contain %q, got %q", tt.errorContains, err.Error())
+ }
+ }
+ if !tt.expectError && result == nil {
+ t.Error("expected result but got nil")
+ }
+ })
+ }
+}
+
+func containsStr(s, substr string) bool {
+ for i := 0; i <= len(s)-len(substr); i++ {
+ if s[i:i+len(substr)] == substr {
+ return true
+ }
+ }
+ return false
+}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go b/internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go
index ad2b595ea..fe09706b6 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_opts_test.go
@@ -3,6 +3,10 @@
package lib
+import (
+ "testing"
+)
+
type MockOptions struct {
Option1 string `json:"option1"`
Option2 int `json:"option2"`
@@ -11,3 +15,10 @@ type MockOptions struct {
func (o MockOptions) Validate() error {
return nil
}
+
+func TestEmptyFilterWeigherPipelineStepOpts_Validate(t *testing.T) {
+ opts := EmptyFilterWeigherPipelineStepOpts{}
+ if err := opts.Validate(); err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_step_test.go b/internal/scheduling/lib/filter_weigher_pipeline_step_test.go
index 5c54d3881..826e8b6f1 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_step_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_step_test.go
@@ -3,4 +3,187 @@
package lib
-// TODO
+import (
+ "errors"
+ "testing"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+// testStepOptions implements FilterWeigherPipelineStepOpts for testing.
+type testStepOptions struct {
+ ValidateError error
+}
+
+func (o testStepOptions) Validate() error {
+ return o.ValidateError
+}
+
+func TestBaseFilterWeigherPipelineStep_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ params runtime.RawExtension
+ expectError bool
+ }{
+ {
+ name: "successful initialization with valid params",
+ params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ expectError: false,
+ },
+ {
+ name: "successful initialization with empty params",
+ params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ expectError: false,
+ },
+ {
+ name: "error on invalid JSON params",
+ params: runtime.RawExtension{
+ Raw: []byte(`{invalid json}`),
+ },
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, testStepOptions]{}
+ cl := fake.NewClientBuilder().Build()
+
+ err := step.Init(t.Context(), cl, tt.params)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if !tt.expectError && step.Client == nil {
+ t.Error("expected client to be set but it was nil")
+ }
+ })
+ }
+}
+
+func TestBaseFilterWeigherPipelineStep_Init_ValidationError(t *testing.T) {
+ // We need a custom type with a Validate method that returns an error
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, failingValidationOptions]{}
+ cl := fake.NewClientBuilder().Build()
+
+ err := step.Init(t.Context(), cl, runtime.RawExtension{Raw: []byte(`{}`)})
+ if err == nil {
+ t.Error("expected error from validation but got nil")
+ }
+}
+
+type failingValidationOptions struct{}
+
+func (o failingValidationOptions) Validate() error {
+ return errors.New("validation failed")
+}
+
+func TestBaseFilterWeigherPipelineStep_IncludeAllHostsFromRequest(t *testing.T) {
+ tests := []struct {
+ name string
+ subjects []string
+ expectedCount int
+ }{
+ {
+ name: "multiple subjects",
+ subjects: []string{"host1", "host2", "host3"},
+ expectedCount: 3,
+ },
+ {
+ name: "single subject",
+ subjects: []string{"host1"},
+ expectedCount: 1,
+ },
+ {
+ name: "empty subjects",
+ subjects: []string{},
+ expectedCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, testStepOptions]{
+ ActivationFunction: ActivationFunction{},
+ }
+
+ request := mockFilterWeigherPipelineRequest{
+ Subjects: tt.subjects,
+ }
+
+ result := step.IncludeAllHostsFromRequest(request)
+
+ if result == nil {
+ t.Fatal("expected result but got nil")
+ }
+ if len(result.Activations) != tt.expectedCount {
+ t.Errorf("expected %d activations, got %d", tt.expectedCount, len(result.Activations))
+ }
+ for _, subject := range tt.subjects {
+ if _, ok := result.Activations[subject]; !ok {
+ t.Errorf("expected subject %s in activations", subject)
+ }
+ }
+ if result.Statistics == nil {
+ t.Error("expected statistics to be initialized")
+ }
+ })
+ }
+}
+
+func TestBaseFilterWeigherPipelineStep_PrepareStats(t *testing.T) {
+ tests := []struct {
+ name string
+ subjects []string
+ unit string
+ expectedUnit string
+ }{
+ {
+ name: "with subjects and unit",
+ subjects: []string{"host1", "host2", "host3"},
+ unit: "percentage",
+ expectedUnit: "percentage",
+ },
+ {
+ name: "empty subjects",
+ subjects: []string{},
+ unit: "count",
+ expectedUnit: "count",
+ },
+ {
+ name: "empty unit",
+ subjects: []string{"host1"},
+ unit: "",
+ expectedUnit: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, testStepOptions]{}
+
+ request := mockFilterWeigherPipelineRequest{
+ Subjects: tt.subjects,
+ }
+
+ stats := step.PrepareStats(request, tt.unit)
+
+ if stats.Unit != tt.expectedUnit {
+ t.Errorf("expected unit %s, got %s", tt.expectedUnit, stats.Unit)
+ }
+ if stats.Subjects == nil {
+ t.Error("expected subjects map to be initialized")
+ }
+ // Maps don't have a cap() function, but we can verify the map is initialized
+ // and works correctly by checking it's not nil (already done above)
+ })
+ }
+}
diff --git a/internal/scheduling/lib/weigher_test.go b/internal/scheduling/lib/weigher_test.go
index 2cbe52239..b1db3ec58 100644
--- a/internal/scheduling/lib/weigher_test.go
+++ b/internal/scheduling/lib/weigher_test.go
@@ -6,9 +6,14 @@ package lib
import (
"context"
"log/slog"
+ "testing"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
type mockWeigher[RequestType FilterWeigherPipelineRequest] struct {
@@ -28,3 +33,227 @@ func (m *mockWeigher[RequestType]) Run(traceLog *slog.Logger, request RequestTyp
}
return m.RunFunc(traceLog, request)
}
+
+// weigherTestOptions implements FilterWeigherPipelineStepOpts for testing.
+type weigherTestOptions struct{}
+
+func (o weigherTestOptions) Validate() error { return nil }
+
+func TestBaseWeigher_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ weigherSpec v1alpha1.WeigherSpec
+ expectError bool
+ }{
+ {
+ name: "successful initialization with valid params",
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "successful initialization with empty params",
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "error on invalid JSON params",
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{invalid json}`),
+ },
+ },
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ weigher := &BaseWeigher[mockFilterWeigherPipelineRequest, weigherTestOptions]{}
+ cl := fake.NewClientBuilder().Build()
+
+ err := weigher.Init(t.Context(), cl, tt.weigherSpec)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if !tt.expectError && weigher.Client == nil {
+ t.Error("expected client to be set but it was nil")
+ }
+ })
+ }
+}
+
+func TestBaseFilterWeigherPipelineStep_CheckKnowledges(t *testing.T) {
+ scheme := runtime.NewScheme()
+ if err := v1alpha1.AddToScheme(scheme); err != nil {
+ t.Fatalf("failed to add scheme: %v", err)
+ }
+
+ tests := []struct {
+ name string
+ knowledges []v1alpha1.Knowledge
+ refs []corev1.ObjectReference
+ expectError bool
+ errorMsg string
+ }{
+ {
+ name: "all knowledges ready",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: false,
+ },
+ {
+ name: "knowledge not found",
+ knowledges: []v1alpha1.Knowledge{},
+ refs: []corev1.ObjectReference{
+ {Name: "missing-knowledge", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "failed to get knowledge",
+ },
+ {
+ name: "knowledge not ready - condition false",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 10,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionFalse,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "not ready",
+ },
+ {
+ name: "knowledge not ready - no data",
+ knowledges: []v1alpha1.Knowledge{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "knowledge1",
+ Namespace: "default",
+ },
+ Status: v1alpha1.KnowledgeStatus{
+ RawLength: 0,
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.KnowledgeConditionReady,
+ Status: metav1.ConditionTrue,
+ },
+ },
+ },
+ },
+ },
+ refs: []corev1.ObjectReference{
+ {Name: "knowledge1", Namespace: "default"},
+ },
+ expectError: true,
+ errorMsg: "no data available",
+ },
+ {
+ name: "empty knowledge list",
+ knowledges: []v1alpha1.Knowledge{},
+ refs: []corev1.ObjectReference{},
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ clientBuilder := fake.NewClientBuilder().WithScheme(scheme)
+ for i := range tt.knowledges {
+ clientBuilder = clientBuilder.WithObjects(&tt.knowledges[i])
+ }
+ cl := clientBuilder.Build()
+
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, weigherTestOptions]{
+ Client: cl,
+ }
+
+ err := step.CheckKnowledges(t.Context(), tt.refs...)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ if tt.expectError && err != nil && tt.errorMsg != "" {
+ if !containsString(err.Error(), tt.errorMsg) {
+ t.Errorf("expected error message to contain %q, got %q", tt.errorMsg, err.Error())
+ }
+ }
+ })
+ }
+}
+
+func TestBaseFilterWeigherPipelineStep_CheckKnowledges_NilClient(t *testing.T) {
+ step := &BaseFilterWeigherPipelineStep[mockFilterWeigherPipelineRequest, weigherTestOptions]{
+ Client: nil,
+ }
+
+ err := step.CheckKnowledges(t.Context(), corev1.ObjectReference{Name: "test", Namespace: "default"})
+
+ if err == nil {
+ t.Error("expected error for nil client but got nil")
+ }
+ if !containsString(err.Error(), "client not initialized") {
+ t.Errorf("expected error message about client not initialized, got %q", err.Error())
+ }
+}
+
+func containsString(s, substr string) bool {
+ return len(s) >= len(substr) && (s == substr || s != "" && containsSubstring(s, substr))
+}
+
+func containsSubstring(s, substr string) bool {
+ for i := 0; i <= len(s)-len(substr); i++ {
+ if s[i:i+len(substr)] == substr {
+ return true
+ }
+ }
+ return false
+}
diff --git a/internal/scheduling/lib/weigher_validation_test.go b/internal/scheduling/lib/weigher_validation_test.go
index f7c31caad..697c3c1c9 100644
--- a/internal/scheduling/lib/weigher_validation_test.go
+++ b/internal/scheduling/lib/weigher_validation_test.go
@@ -4,11 +4,83 @@
package lib
import (
+ "context"
+ "errors"
"log/slog"
"reflect"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestValidateWeigher(t *testing.T) {
+ weigher := &mockWeigher[mockFilterWeigherPipelineRequest]{}
+ validator := validateWeigher(weigher)
+
+ if validator == nil {
+ t.Fatal("expected validator but got nil")
+ }
+ if validator.Weigher != weigher {
+ t.Error("expected weigher to be set in validator")
+ }
+}
+
+func TestWeigherValidator_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ weigherSpec v1alpha1.WeigherSpec
+ initError error
+ expectError bool
+ }{
+ {
+ name: "successful initialization",
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ initError: nil,
+ expectError: false,
+ },
+ {
+ name: "initialization error",
+ weigherSpec: v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ initError: errors.New("init error"),
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ weigher := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(_ context.Context, _ client.Client, _ v1alpha1.WeigherSpec) error {
+ return tt.initError
+ },
+ }
+ validator := validateWeigher(weigher)
+ cl := fake.NewClientBuilder().Build()
+
+ err := validator.Init(t.Context(), cl, tt.weigherSpec)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error but got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ })
+ }
+}
+
func TestWeigherValidator_Run_ValidHosts(t *testing.T) {
mockStep := &mockWeigher[mockFilterWeigherPipelineRequest]{
RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
From 9816fc1f3e09f2326c4eeb4fc08ee59fd644f8cd Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 12:13:34 +0100
Subject: [PATCH 40/41] More tests
---
.../cinder/external_scheduler_api_test.go | 49 ++
...filter_weigher_pipeline_controller_test.go | 10 +
.../scheduling/lib/detector_monitor_test.go | 62 ++
.../scheduling/lib/detector_pipeline_test.go | 370 ++++++++
.../scheduling/lib/filter_monitor_test.go | 118 +++
.../lib/filter_weigher_pipeline_test.go | 197 +++++
.../scheduling/lib/weigher_monitor_test.go | 118 +++
.../manila/external_scheduler_api_test.go | 49 ++
...filter_weigher_pipeline_controller_test.go | 10 +
.../netapp_cpu_usage_balancing_test.go | 77 ++
.../filter_has_enough_capacity_test.go | 799 +-----------------
.../filters/filter_node_affinity_test.go | 20 +
.../filters/filter_node_available_test.go | 20 +
.../filters/filter_node_capacity_test.go | 20 +
.../pods/plugins/filters/filter_noop_test.go | 20 +
.../pods/plugins/filters/filter_taint_test.go | 20 +
.../pods/plugins/weighers/binpack_test.go | 58 ++
17 files changed, 1238 insertions(+), 779 deletions(-)
create mode 100644 internal/scheduling/lib/filter_monitor_test.go
create mode 100644 internal/scheduling/lib/weigher_monitor_test.go
diff --git a/internal/scheduling/cinder/external_scheduler_api_test.go b/internal/scheduling/cinder/external_scheduler_api_test.go
index 5ad6d793e..480a5a6d5 100644
--- a/internal/scheduling/cinder/external_scheduler_api_test.go
+++ b/internal/scheduling/cinder/external_scheduler_api_test.go
@@ -308,6 +308,55 @@ func TestHTTPAPI_CinderExternalScheduler(t *testing.T) {
}
}
+func TestHTTPAPI_inferPipelineName(t *testing.T) {
+ config := conf.Config{SchedulingDomain: "test-operator"}
+ delegate := &mockHTTPAPIDelegate{}
+ api := NewAPI(config, delegate).(*httpAPI)
+
+ tests := []struct {
+ name string
+ request cinderapi.ExternalSchedulerRequest
+ expectedName string
+ expectError bool
+ }{
+ {
+ name: "returns default pipeline name",
+ request: cinderapi.ExternalSchedulerRequest{
+ Hosts: []cinderapi.ExternalSchedulerHost{
+ {VolumeHost: "host1"},
+ },
+ Weights: map[string]float64{
+ "host1": 1.0,
+ },
+ },
+ expectedName: "cinder-external-scheduler",
+ expectError: false,
+ },
+ {
+ name: "returns default pipeline name for empty request",
+ request: cinderapi.ExternalSchedulerRequest{},
+ expectedName: "cinder-external-scheduler",
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pipelineName, err := api.inferPipelineName(tt.request)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error, got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if pipelineName != tt.expectedName {
+ t.Errorf("expected pipeline name %s, got %s", tt.expectedName, pipelineName)
+ }
+ })
+ }
+}
+
func TestHTTPAPI_CinderExternalScheduler_DecisionCreation(t *testing.T) {
config := conf.Config{SchedulingDomain: v1alpha1.SchedulingDomainCinder}
diff --git a/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go b/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
index 444bdcebe..d5389ae38 100644
--- a/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
@@ -469,6 +469,16 @@ func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T)
}
}
+func TestFilterWeigherPipelineController_PipelineType(t *testing.T) {
+ controller := &FilterWeigherPipelineController{}
+
+ pipelineType := controller.PipelineType()
+
+ if pipelineType != v1alpha1.PipelineTypeFilterWeigher {
+ t.Errorf("expected pipeline type %s, got %s", v1alpha1.PipelineTypeFilterWeigher, pipelineType)
+ }
+}
+
func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
controller := &FilterWeigherPipelineController{
Monitor: lib.FilterWeigherPipelineMonitor{},
diff --git a/internal/scheduling/lib/detector_monitor_test.go b/internal/scheduling/lib/detector_monitor_test.go
index 0ebd40775..008d9411f 100644
--- a/internal/scheduling/lib/detector_monitor_test.go
+++ b/internal/scheduling/lib/detector_monitor_test.go
@@ -256,3 +256,65 @@ func TestMonitorStep_WithNilMonitor(t *testing.T) {
t.Error("expected Run to be called on wrapped step")
}
}
+
+func TestDetectorPipelineMonitor_SubPipeline(t *testing.T) {
+ tests := []struct {
+ name string
+ originalName string
+ newPipelineName string
+ expectedOriginal string
+ expectedNew string
+ }{
+ {
+ name: "creates copy with new name",
+ originalName: "original-pipeline",
+ newPipelineName: "new-pipeline",
+ expectedOriginal: "original-pipeline",
+ expectedNew: "new-pipeline",
+ },
+ {
+ name: "works with empty original name",
+ originalName: "",
+ newPipelineName: "new-pipeline",
+ expectedOriginal: "",
+ expectedNew: "new-pipeline",
+ },
+ {
+ name: "works with empty new name",
+ originalName: "original-pipeline",
+ newPipelineName: "",
+ expectedOriginal: "original-pipeline",
+ expectedNew: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ original := NewDetectorPipelineMonitor()
+ original.PipelineName = tt.originalName
+
+ copied := original.SubPipeline(tt.newPipelineName)
+
+ // Check that original is unchanged
+ if original.PipelineName != tt.expectedOriginal {
+ t.Errorf("original pipeline name changed, expected %s, got %s", tt.expectedOriginal, original.PipelineName)
+ }
+
+ // Check that copy has new name
+ if copied.PipelineName != tt.expectedNew {
+ t.Errorf("copied pipeline name incorrect, expected %s, got %s", tt.expectedNew, copied.PipelineName)
+ }
+
+ // Verify that the metrics are shared (same pointers)
+ if copied.stepRunTimer != original.stepRunTimer {
+ t.Error("expected stepRunTimer to be shared between original and copy")
+ }
+ if copied.stepDeschedulingCounter != original.stepDeschedulingCounter {
+ t.Error("expected stepDeschedulingCounter to be shared between original and copy")
+ }
+ if copied.pipelineRunTimer != original.pipelineRunTimer {
+ t.Error("expected pipelineRunTimer to be shared between original and copy")
+ }
+ })
+ }
+}
diff --git a/internal/scheduling/lib/detector_pipeline_test.go b/internal/scheduling/lib/detector_pipeline_test.go
index f5e4e9efe..79f39db53 100644
--- a/internal/scheduling/lib/detector_pipeline_test.go
+++ b/internal/scheduling/lib/detector_pipeline_test.go
@@ -2,3 +2,373 @@
// SPDX-License-Identifier: Apache-2.0
package lib
+
+import (
+ "context"
+ "errors"
+ "strings"
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+// mockDetectorStep implements Detector[mockDetection]
+type mockDetectorStep struct {
+ decisions []mockDetection
+ initErr error
+ runErr error
+}
+
+func (m *mockDetectorStep) Init(ctx context.Context, client client.Client, step v1alpha1.DetectorSpec) error {
+ return m.initErr
+}
+
+func (m *mockDetectorStep) Run() ([]mockDetection, error) {
+ return m.decisions, m.runErr
+}
+
+func TestDetectorPipeline_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ confedSteps []v1alpha1.DetectorSpec
+ supportedSteps map[string]Detector[mockDetection]
+ expectNonCritical bool
+ expectCritical bool
+ expectedStepsCount int
+ }{
+ {
+ name: "successful init with one step",
+ confedSteps: []v1alpha1.DetectorSpec{
+ {Name: "step1", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ },
+ supportedSteps: map[string]Detector[mockDetection]{
+ "step1": &mockDetectorStep{},
+ },
+ expectNonCritical: false,
+ expectCritical: false,
+ expectedStepsCount: 1,
+ },
+ {
+ name: "successful init with multiple steps",
+ confedSteps: []v1alpha1.DetectorSpec{
+ {Name: "step1", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ {Name: "step2", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ },
+ supportedSteps: map[string]Detector[mockDetection]{
+ "step1": &mockDetectorStep{},
+ "step2": &mockDetectorStep{},
+ },
+ expectNonCritical: false,
+ expectCritical: false,
+ expectedStepsCount: 2,
+ },
+ {
+ name: "unsupported step returns non-critical error",
+ confedSteps: []v1alpha1.DetectorSpec{
+ {Name: "unsupported_step", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ },
+ supportedSteps: map[string]Detector[mockDetection]{},
+ expectNonCritical: true,
+ expectCritical: false,
+ expectedStepsCount: 0,
+ },
+ {
+ name: "step init error returns non-critical error",
+ confedSteps: []v1alpha1.DetectorSpec{
+ {Name: "failing_step", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ },
+ supportedSteps: map[string]Detector[mockDetection]{
+ "failing_step": &mockDetectorStep{initErr: errors.New("init failed")},
+ },
+ expectNonCritical: true,
+ expectCritical: false,
+ expectedStepsCount: 0,
+ },
+ {
+ name: "empty configuration",
+ confedSteps: []v1alpha1.DetectorSpec{},
+ supportedSteps: map[string]Detector[mockDetection]{},
+ expectNonCritical: false,
+ expectCritical: false,
+ expectedStepsCount: 0,
+ },
+ {
+ name: "mixed valid and invalid steps",
+ confedSteps: []v1alpha1.DetectorSpec{
+ {Name: "valid_step", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ {Name: "invalid_step", Params: runtime.RawExtension{Raw: []byte("{}")}},
+ },
+ supportedSteps: map[string]Detector[mockDetection]{
+ "valid_step": &mockDetectorStep{},
+ },
+ expectNonCritical: true,
+ expectCritical: false,
+ expectedStepsCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cl := fake.NewClientBuilder().Build()
+ pipeline := &DetectorPipeline[mockDetection]{
+ Client: cl,
+ Monitor: DetectorPipelineMonitor{},
+ }
+
+ nonCriticalErr, criticalErr := pipeline.Init(
+ context.Background(),
+ tt.confedSteps,
+ tt.supportedSteps,
+ )
+
+ if tt.expectNonCritical && nonCriticalErr == nil {
+ t.Error("expected non-critical error, got nil")
+ }
+ if !tt.expectNonCritical && nonCriticalErr != nil {
+ t.Errorf("expected no non-critical error, got %v", nonCriticalErr)
+ }
+ if tt.expectCritical && criticalErr == nil {
+ t.Error("expected critical error, got nil")
+ }
+ if !tt.expectCritical && criticalErr != nil {
+ t.Errorf("expected no critical error, got %v", criticalErr)
+ }
+ if len(pipeline.steps) != tt.expectedStepsCount {
+ t.Errorf("expected %d steps, got %d", tt.expectedStepsCount, len(pipeline.steps))
+ }
+ })
+ }
+}
+
+func TestDetectorPipeline_Run(t *testing.T) {
+ tests := []struct {
+ name string
+ steps map[string]Detector[mockDetection]
+ expectedCount int
+ expectedSteps []string
+ stepWithErrors bool
+ }{
+ {
+ name: "run single step successfully",
+ steps: map[string]Detector[mockDetection]{
+ "step1": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ },
+ expectedCount: 1,
+ expectedSteps: []string{"step1"},
+ },
+ {
+ name: "run multiple steps successfully",
+ steps: map[string]Detector[mockDetection]{
+ "step1": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ "step2": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm2", host: "host2", reason: "reason2"},
+ },
+ },
+ },
+ expectedCount: 2,
+ expectedSteps: []string{"step1", "step2"},
+ },
+ {
+ name: "step with error is skipped",
+ steps: map[string]Detector[mockDetection]{
+ "failing_step": &mockDetectorStep{
+ runErr: errors.New("run failed"),
+ },
+ "working_step": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ },
+ expectedCount: 1,
+ expectedSteps: []string{"working_step"},
+ stepWithErrors: true,
+ },
+ {
+ name: "step returning ErrStepSkipped is skipped",
+ steps: map[string]Detector[mockDetection]{
+ "skipped_step": &mockDetectorStep{
+ runErr: ErrStepSkipped,
+ },
+ "working_step": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ },
+ expectedCount: 1,
+ expectedSteps: []string{"working_step"},
+ },
+ {
+ name: "empty pipeline",
+ steps: map[string]Detector[mockDetection]{},
+ expectedCount: 0,
+ expectedSteps: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pipeline := &DetectorPipeline[mockDetection]{
+ steps: tt.steps,
+ Monitor: DetectorPipelineMonitor{},
+ }
+
+ result := pipeline.Run()
+
+ if len(result) != tt.expectedCount {
+ t.Errorf("expected %d step results, got %d", tt.expectedCount, len(result))
+ }
+
+ for _, stepName := range tt.expectedSteps {
+ if _, ok := result[stepName]; !ok {
+ t.Errorf("expected step %s in result", stepName)
+ }
+ }
+ })
+ }
+}
+
+func TestDetectorPipeline_Combine(t *testing.T) {
+ tests := []struct {
+ name string
+ decisionsByStep map[string][]mockDetection
+ expectedCount int
+ expectConflict bool
+ }{
+ {
+ name: "combine single decision",
+ decisionsByStep: map[string][]mockDetection{
+ "step1": {
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ expectedCount: 1,
+ },
+ {
+ name: "combine multiple decisions from different steps",
+ decisionsByStep: map[string][]mockDetection{
+ "step1": {
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ "step2": {
+ {resource: "vm2", host: "host2", reason: "reason2"},
+ },
+ },
+ expectedCount: 2,
+ },
+ {
+ name: "combine decisions for same resource with same host",
+ decisionsByStep: map[string][]mockDetection{
+ "step1": {
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ "step2": {
+ {resource: "vm1", host: "host1", reason: "reason2"},
+ },
+ },
+ expectedCount: 1,
+ },
+ {
+ name: "conflicting hosts for same resource",
+ decisionsByStep: map[string][]mockDetection{
+ "step1": {
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ "step2": {
+ {resource: "vm1", host: "host2", reason: "reason2"},
+ },
+ },
+ expectedCount: 0,
+ expectConflict: true,
+ },
+ {
+ name: "empty decisions",
+ decisionsByStep: map[string][]mockDetection{},
+ expectedCount: 0,
+ },
+ {
+ name: "step with empty decisions",
+ decisionsByStep: map[string][]mockDetection{
+ "step1": {},
+ },
+ expectedCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pipeline := &DetectorPipeline[mockDetection]{}
+
+ result := pipeline.Combine(tt.decisionsByStep)
+
+ if len(result) != tt.expectedCount {
+ t.Errorf("expected %d combined decisions, got %d", tt.expectedCount, len(result))
+ }
+ })
+ }
+}
+
+func TestDetectorPipeline_Combine_MergedReason(t *testing.T) {
+ pipeline := &DetectorPipeline[mockDetection]{}
+
+ decisionsByStep := map[string][]mockDetection{
+ "step1": {
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ "step2": {
+ {resource: "vm1", host: "host1", reason: "reason2"},
+ },
+ }
+
+ result := pipeline.Combine(decisionsByStep)
+
+ if len(result) != 1 {
+ t.Fatalf("expected 1 combined decision, got %d", len(result))
+ }
+
+ // The merged reason should contain both original reasons
+ reason := result[0].GetReason()
+ if reason == "" {
+ t.Error("expected non-empty reason")
+ }
+ if reason != "multiple reasons: reason1; reason2" && reason != "multiple reasons: reason2; reason1" {
+ // The order might vary due to map iteration order
+ if !strings.Contains(reason, "reason1") || !strings.Contains(reason, "reason2") {
+ t.Errorf("expected reason to contain both 'reason1' and 'reason2', got %s", reason)
+ }
+ }
+}
+
+func TestDetectorPipeline_RunWithMonitor(t *testing.T) {
+ // Test that Run works with a proper monitor
+ monitor := NewDetectorPipelineMonitor()
+ pipeline := &DetectorPipeline[mockDetection]{
+ steps: map[string]Detector[mockDetection]{
+ "step1": &mockDetectorStep{
+ decisions: []mockDetection{
+ {resource: "vm1", host: "host1", reason: "reason1"},
+ },
+ },
+ },
+ Monitor: monitor,
+ }
+
+ result := pipeline.Run()
+
+ if len(result) != 1 {
+ t.Errorf("expected 1 step result, got %d", len(result))
+ }
+}
diff --git a/internal/scheduling/lib/filter_monitor_test.go b/internal/scheduling/lib/filter_monitor_test.go
new file mode 100644
index 000000000..2e0e2e33f
--- /dev/null
+++ b/internal/scheduling/lib/filter_monitor_test.go
@@ -0,0 +1,118 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestMonitorFilter(t *testing.T) {
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ mockFilter := &mockFilter[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, cl client.Client, step v1alpha1.FilterSpec) error {
+ return nil
+ },
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{"host1": 0.5, "host2": 1.0},
+ }, nil
+ },
+ }
+
+ fm := monitorFilter(mockFilter, "test-filter", monitor)
+ if fm == nil {
+ t.Fatal("expected filter monitor, got nil")
+ }
+ if fm.filter == nil {
+ t.Error("expected filter to be set")
+ }
+ if fm.monitor == nil {
+ t.Error("expected monitor to be set")
+ }
+ if fm.monitor.stepName != "test-filter" {
+ t.Errorf("expected step name 'test-filter', got '%s'", fm.monitor.stepName)
+ }
+}
+
+func TestFilterMonitor_Init(t *testing.T) {
+ initCalled := false
+ mockFilter := &mockFilter[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, cl client.Client, step v1alpha1.FilterSpec) error {
+ initCalled = true
+ return nil
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+ fm := monitorFilter(mockFilter, "test-filter", monitor)
+
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := fm.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if !initCalled {
+ t.Error("expected Init to be called on wrapped filter")
+ }
+}
+
+func TestFilterMonitor_Run(t *testing.T) {
+ runCalled := false
+ mockFilter := &mockFilter[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ runCalled = true
+ return &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{"host1": 0.5, "host2": 1.0},
+ }, nil
+ },
+ }
+
+ runTimer := &mockObserver{}
+ removedSubjectsObserver := &mockObserver{}
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+ fm := monitorFilter(mockFilter, "test-filter", monitor)
+ // Manually set monitors for testing
+ fm.monitor.runTimer = runTimer
+ fm.monitor.removedSubjectsObserver = removedSubjectsObserver
+
+ request := mockFilterWeigherPipelineRequest{
+ Subjects: []string{"host1", "host2", "host3"},
+ Weights: map[string]float64{"host1": 0.1, "host2": 0.2, "host3": 0.3},
+ }
+
+ result, err := fm.Run(slog.Default(), request)
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if !runCalled {
+ t.Error("expected Run to be called on wrapped filter")
+ }
+ if result == nil {
+ t.Fatal("expected result, got nil")
+ }
+ if len(result.Activations) != 2 {
+ t.Errorf("expected 2 activations, got %d", len(result.Activations))
+ }
+}
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
index 25d9c2eaf..726be0fa9 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -4,9 +4,15 @@
package lib
import (
+ "context"
"log/slog"
"math"
"testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
// Mock pipeline type for testing
@@ -220,3 +226,194 @@ func TestPipeline_RunFilters(t *testing.T) {
t.Fatalf("expected 2 step results, got %d", len(req.Subjects))
}
}
+
+func TestInitNewFilterWeigherPipeline_Success(t *testing.T) {
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ supportedFilters := map[string]func() Filter[mockFilterWeigherPipelineRequest]{
+ "test-filter": func() Filter[mockFilterWeigherPipelineRequest] {
+ return &mockFilter[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, c client.Client, step v1alpha1.FilterSpec) error {
+ return nil
+ },
+ }
+ },
+ }
+
+ supportedWeighers := map[string]func() Weigher[mockFilterWeigherPipelineRequest]{
+ "test-weigher": func() Weigher[mockFilterWeigherPipelineRequest] {
+ return &mockWeigher[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, c client.Client, step v1alpha1.WeigherSpec) error {
+ return nil
+ },
+ }
+ },
+ }
+
+ confedFilters := []v1alpha1.FilterSpec{
+ {
+ Name: "test-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ }
+
+ confedWeighers := []v1alpha1.WeigherSpec{
+ {
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ result := InitNewFilterWeigherPipeline(
+ t.Context(),
+ cl,
+ "test-pipeline",
+ supportedFilters,
+ confedFilters,
+ supportedWeighers,
+ confedWeighers,
+ monitor,
+ )
+
+ if result.CriticalErr != nil {
+ t.Fatalf("expected no critical error, got %v", result.CriticalErr)
+ }
+ if result.Pipeline == nil {
+ t.Fatal("expected pipeline, got nil")
+ }
+}
+
+func TestInitNewFilterWeigherPipeline_UnsupportedFilter(t *testing.T) {
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ supportedFilters := map[string]func() Filter[mockFilterWeigherPipelineRequest]{}
+ supportedWeighers := map[string]func() Weigher[mockFilterWeigherPipelineRequest]{}
+
+ confedFilters := []v1alpha1.FilterSpec{
+ {
+ Name: "unsupported-filter",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ result := InitNewFilterWeigherPipeline(
+ t.Context(),
+ cl,
+ "test-pipeline",
+ supportedFilters,
+ confedFilters,
+ supportedWeighers,
+ nil,
+ monitor,
+ )
+
+ if result.CriticalErr == nil {
+ t.Fatal("expected critical error for unsupported filter, got nil")
+ }
+}
+
+func TestInitNewFilterWeigherPipeline_NameOverlap(t *testing.T) {
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ // Create filter and weigher with same name
+ supportedFilters := map[string]func() Filter[mockFilterWeigherPipelineRequest]{
+ "duplicate-name": func() Filter[mockFilterWeigherPipelineRequest] {
+ return &mockFilter[mockFilterWeigherPipelineRequest]{}
+ },
+ }
+ supportedWeighers := map[string]func() Weigher[mockFilterWeigherPipelineRequest]{
+ "duplicate-name": func() Weigher[mockFilterWeigherPipelineRequest] {
+ return &mockWeigher[mockFilterWeigherPipelineRequest]{}
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ result := InitNewFilterWeigherPipeline(
+ t.Context(),
+ cl,
+ "test-pipeline",
+ supportedFilters,
+ nil,
+ supportedWeighers,
+ nil,
+ monitor,
+ )
+
+ if result.CriticalErr == nil {
+ t.Fatal("expected critical error for name overlap, got nil")
+ }
+}
+
+func TestInitNewFilterWeigherPipeline_UnsupportedWeigher(t *testing.T) {
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ supportedFilters := map[string]func() Filter[mockFilterWeigherPipelineRequest]{}
+ supportedWeighers := map[string]func() Weigher[mockFilterWeigherPipelineRequest]{}
+
+ confedWeighers := []v1alpha1.WeigherSpec{
+ {
+ Name: "unsupported-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ result := InitNewFilterWeigherPipeline(
+ t.Context(),
+ cl,
+ "test-pipeline",
+ supportedFilters,
+ nil,
+ supportedWeighers,
+ confedWeighers,
+ monitor,
+ )
+
+ // Unsupported weigher should result in non-critical error
+ if result.NonCriticalErr == nil {
+ t.Fatal("expected non-critical error for unsupported weigher, got nil")
+ }
+ if result.CriticalErr != nil {
+ t.Fatalf("expected no critical error, got %v", result.CriticalErr)
+ }
+}
+
+func TestFilterWeigherPipelineMonitor_SubPipeline(t *testing.T) {
+ monitor := NewPipelineMonitor()
+
+ subPipeline := monitor.SubPipeline("test-sub-pipeline")
+
+ if subPipeline.PipelineName != "test-sub-pipeline" {
+ t.Errorf("expected pipeline name 'test-sub-pipeline', got '%s'", subPipeline.PipelineName)
+ }
+ // Verify that the original monitor is not modified
+ if monitor.PipelineName == "test-sub-pipeline" {
+ t.Error("original monitor should not be modified")
+ }
+}
diff --git a/internal/scheduling/lib/weigher_monitor_test.go b/internal/scheduling/lib/weigher_monitor_test.go
new file mode 100644
index 000000000..64e15d28f
--- /dev/null
+++ b/internal/scheduling/lib/weigher_monitor_test.go
@@ -0,0 +1,118 @@
+// Copyright SAP SE
+// SPDX-License-Identifier: Apache-2.0
+
+package lib
+
+import (
+ "context"
+ "log/slog"
+ "testing"
+
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestMonitorWeigher(t *testing.T) {
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+
+ mockWeigher := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, cl client.Client, step v1alpha1.WeigherSpec) error {
+ return nil
+ },
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ return &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{"host1": 0.5, "host2": 1.0},
+ }, nil
+ },
+ }
+
+ wm := monitorWeigher(mockWeigher, "test-weigher", monitor)
+ if wm == nil {
+ t.Fatal("expected weigher monitor, got nil")
+ }
+ if wm.weigher == nil {
+ t.Error("expected weigher to be set")
+ }
+ if wm.monitor == nil {
+ t.Error("expected monitor to be set")
+ }
+ if wm.monitor.stepName != "test-weigher" {
+ t.Errorf("expected step name 'test-weigher', got '%s'", wm.monitor.stepName)
+ }
+}
+
+func TestWeigherMonitor_Init(t *testing.T) {
+ initCalled := false
+ mockWeigher := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ InitFunc: func(ctx context.Context, cl client.Client, step v1alpha1.WeigherSpec) error {
+ initCalled = true
+ return nil
+ },
+ }
+
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+ wm := monitorWeigher(mockWeigher, "test-weigher", monitor)
+
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := wm.Init(t.Context(), cl, v1alpha1.WeigherSpec{
+ Name: "test-weigher",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if !initCalled {
+ t.Error("expected Init to be called on wrapped weigher")
+ }
+}
+
+func TestWeigherMonitor_Run(t *testing.T) {
+ runCalled := false
+ mockWeigher := &mockWeigher[mockFilterWeigherPipelineRequest]{
+ RunFunc: func(traceLog *slog.Logger, request mockFilterWeigherPipelineRequest) (*FilterWeigherPipelineStepResult, error) {
+ runCalled = true
+ return &FilterWeigherPipelineStepResult{
+ Activations: map[string]float64{"host1": 0.5, "host2": 1.0},
+ }, nil
+ },
+ }
+
+ runTimer := &mockObserver{}
+ removedSubjectsObserver := &mockObserver{}
+ monitor := FilterWeigherPipelineMonitor{
+ PipelineName: "test-pipeline",
+ }
+ wm := monitorWeigher(mockWeigher, "test-weigher", monitor)
+ // Manually set monitors for testing
+ wm.monitor.runTimer = runTimer
+ wm.monitor.removedSubjectsObserver = removedSubjectsObserver
+
+ request := mockFilterWeigherPipelineRequest{
+ Subjects: []string{"host1", "host2", "host3"},
+ Weights: map[string]float64{"host1": 0.1, "host2": 0.2, "host3": 0.3},
+ }
+
+ result, err := wm.Run(slog.Default(), request)
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if !runCalled {
+ t.Error("expected Run to be called on wrapped weigher")
+ }
+ if result == nil {
+ t.Fatal("expected result, got nil")
+ }
+ if len(result.Activations) != 2 {
+ t.Errorf("expected 2 activations, got %d", len(result.Activations))
+ }
+}
diff --git a/internal/scheduling/manila/external_scheduler_api_test.go b/internal/scheduling/manila/external_scheduler_api_test.go
index 01f11c6b2..334a3f1fd 100644
--- a/internal/scheduling/manila/external_scheduler_api_test.go
+++ b/internal/scheduling/manila/external_scheduler_api_test.go
@@ -308,6 +308,55 @@ func TestHTTPAPI_ManilaExternalScheduler(t *testing.T) {
}
}
+func TestHTTPAPI_inferPipelineName(t *testing.T) {
+ config := conf.Config{SchedulingDomain: "test-operator"}
+ delegate := &mockHTTPAPIDelegate{}
+ api := NewAPI(config, delegate).(*httpAPI)
+
+ tests := []struct {
+ name string
+ request manilaapi.ExternalSchedulerRequest
+ expectedName string
+ expectError bool
+ }{
+ {
+ name: "returns default pipeline name",
+ request: manilaapi.ExternalSchedulerRequest{
+ Hosts: []manilaapi.ExternalSchedulerHost{
+ {ShareHost: "host1"},
+ },
+ Weights: map[string]float64{
+ "host1": 1.0,
+ },
+ },
+ expectedName: "manila-external-scheduler",
+ expectError: false,
+ },
+ {
+ name: "returns default pipeline name for empty request",
+ request: manilaapi.ExternalSchedulerRequest{},
+ expectedName: "manila-external-scheduler",
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pipelineName, err := api.inferPipelineName(tt.request)
+
+ if tt.expectError && err == nil {
+ t.Error("expected error, got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ if pipelineName != tt.expectedName {
+ t.Errorf("expected pipeline name %s, got %s", tt.expectedName, pipelineName)
+ }
+ })
+ }
+}
+
func TestHTTPAPI_ManilaExternalScheduler_DecisionCreation(t *testing.T) {
config := conf.Config{SchedulingDomain: v1alpha1.SchedulingDomainManila}
diff --git a/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
index e6c158c23..1739ae362 100644
--- a/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
@@ -466,6 +466,16 @@ func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T)
}
}
+func TestFilterWeigherPipelineController_PipelineType(t *testing.T) {
+ controller := &FilterWeigherPipelineController{}
+
+ pipelineType := controller.PipelineType()
+
+ if pipelineType != v1alpha1.PipelineTypeFilterWeigher {
+ t.Errorf("expected pipeline type %s, got %s", v1alpha1.PipelineTypeFilterWeigher, pipelineType)
+ }
+}
+
func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
scheme := runtime.NewScheme()
if err := v1alpha1.AddToScheme(scheme); err != nil {
diff --git a/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go b/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
index f78559b28..839d1d2a1 100644
--- a/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
+++ b/internal/scheduling/manila/plugins/weighers/netapp_cpu_usage_balancing_test.go
@@ -14,6 +14,83 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestNetappCPUUsageBalancingStepOpts_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ opts NetappCPUUsageBalancingStepOpts
+ expectError bool
+ }{
+ {
+ name: "valid options with different bounds",
+ opts: NetappCPUUsageBalancingStepOpts{
+ AvgCPUUsageLowerBound: 0.0,
+ AvgCPUUsageUpperBound: 100.0,
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -1.0,
+ MaxCPUUsageLowerBound: 0.0,
+ MaxCPUUsageUpperBound: 100.0,
+ MaxCPUUsageActivationLowerBound: 0.0,
+ MaxCPUUsageActivationUpperBound: -1.0,
+ },
+ expectError: false,
+ },
+ {
+ name: "invalid - avg bounds equal",
+ opts: NetappCPUUsageBalancingStepOpts{
+ AvgCPUUsageLowerBound: 50.0,
+ AvgCPUUsageUpperBound: 50.0, // Same as lower
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -1.0,
+ MaxCPUUsageLowerBound: 0.0,
+ MaxCPUUsageUpperBound: 100.0,
+ MaxCPUUsageActivationLowerBound: 0.0,
+ MaxCPUUsageActivationUpperBound: -1.0,
+ },
+ expectError: true,
+ },
+ {
+ name: "invalid - max bounds equal",
+ opts: NetappCPUUsageBalancingStepOpts{
+ AvgCPUUsageLowerBound: 0.0,
+ AvgCPUUsageUpperBound: 100.0,
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -1.0,
+ MaxCPUUsageLowerBound: 75.0,
+ MaxCPUUsageUpperBound: 75.0, // Same as lower
+ MaxCPUUsageActivationLowerBound: 0.0,
+ MaxCPUUsageActivationUpperBound: -1.0,
+ },
+ expectError: true,
+ },
+ {
+ name: "invalid - both bounds equal",
+ opts: NetappCPUUsageBalancingStepOpts{
+ AvgCPUUsageLowerBound: 0.0,
+ AvgCPUUsageUpperBound: 0.0, // Same as lower
+ AvgCPUUsageActivationLowerBound: 0.0,
+ AvgCPUUsageActivationUpperBound: -1.0,
+ MaxCPUUsageLowerBound: 0.0,
+ MaxCPUUsageUpperBound: 0.0, // Same as lower
+ MaxCPUUsageActivationLowerBound: 0.0,
+ MaxCPUUsageActivationUpperBound: -1.0,
+ },
+ expectError: true, // First error is for avg bounds
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.opts.Validate()
+ if tt.expectError && err == nil {
+ t.Error("expected error, got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ })
+ }
+}
+
func TestNetappCPUUsageBalancingStep_Run(t *testing.T) {
scheme, err := v1alpha1.SchemeBuilder.Build()
if err != nil {
diff --git a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
index e1bdaaa84..a6e6561e9 100644
--- a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
+++ b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
@@ -4,803 +4,44 @@
package filters
import (
- "log/slog"
"testing"
-
- api "github.com/cobaltcore-dev/cortex/api/delegation/nova"
- "github.com/cobaltcore-dev/cortex/api/v1alpha1"
- hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-func TestFilterHasEnoughCapacity_Run(t *testing.T) {
- scheme, err := hv1.SchemeBuilder.Build()
- if err != nil {
- t.Fatalf("expected no error building hypervisor scheme, got %v", err)
- }
- if err := v1alpha1.AddToScheme(scheme); err != nil {
- t.Fatalf("expected no error adding v1alpha1 to scheme, got %v", err)
- }
-
- // Define hypervisors with various capacity configurations
- hvs := []client.Object{
- &hv1.Hypervisor{
- ObjectMeta: v1.ObjectMeta{
- Name: "host1",
- },
- Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
- "cpu": resource.MustParse("32"), // 32 vCPUs
- "memory": resource.MustParse("64Gi"), // 64 GiB = 68719476736 bytes
- },
- Allocation: map[string]resource.Quantity{
- "cpu": resource.MustParse("8"), // 8 vCPUs used
- "memory": resource.MustParse("16Gi"), // 16 GiB used
- },
- },
- },
- &hv1.Hypervisor{
- ObjectMeta: v1.ObjectMeta{
- Name: "host2",
- },
- Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
- "cpu": resource.MustParse("16"),
- "memory": resource.MustParse("32Gi"),
- },
- Allocation: map[string]resource.Quantity{
- "cpu": resource.MustParse("14"),
- "memory": resource.MustParse("28Gi"),
- },
- },
- },
- &hv1.Hypervisor{
- ObjectMeta: v1.ObjectMeta{
- Name: "host3",
- },
- Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
- "cpu": resource.MustParse("64"),
- "memory": resource.MustParse("128Gi"),
- },
- Allocation: map[string]resource.Quantity{
- "cpu": resource.MustParse("0"),
- "memory": resource.MustParse("0"),
- },
- },
- },
- &hv1.Hypervisor{
- ObjectMeta: v1.ObjectMeta{
- Name: "host4",
- },
- Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
- "cpu": resource.MustParse("8"),
- "memory": resource.MustParse("16Gi"),
- },
- Allocation: map[string]resource.Quantity{
- "cpu": resource.MustParse("4"),
- "memory": resource.MustParse("12Gi"),
- },
- },
- },
- &hv1.Hypervisor{
- ObjectMeta: v1.ObjectMeta{
- Name: "host5",
- },
- Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
- "cpu": resource.MustParse("48"),
- "memory": resource.MustParse("96Gi"),
- },
- Allocation: map[string]resource.Quantity{
- "cpu": resource.MustParse("40"),
- "memory": resource.MustParse("80Gi"),
- },
- },
- },
- }
-
+func TestFilterHasEnoughCapacityOpts_Validate(t *testing.T) {
tests := []struct {
- name string
- request api.ExternalSchedulerRequest
- reservations []client.Object
- opts FilterHasEnoughCapacityOpts
- expectedHosts []string
- filteredHosts []string
- expectError bool
+ name string
+ opts FilterHasEnoughCapacityOpts
+ expectError bool
}{
{
- name: "Single instance with sufficient capacity on all hosts",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048, // 2 GB
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host2"},
- {ComputeHost: "host3"},
- {ComputeHost: "host4"},
- {ComputeHost: "host5"},
- },
- },
- expectedHosts: []string{"host1", "host2", "host3", "host4", "host5"},
- filteredHosts: []string{},
- },
- {
- name: "Single instance - filter host with insufficient CPU",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.large",
- VCPUs: 4,
- MemoryMB: 4096,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host2"},
- {ComputeHost: "host4"},
- },
- },
- expectedHosts: []string{"host1", "host4"},
- filteredHosts: []string{"host2"},
- },
- {
- name: "Single instance - filter host with insufficient memory",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.xlarge",
- VCPUs: 2,
- MemoryMB: 20480, // 20 GB
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host2"},
- {ComputeHost: "host5"},
- },
- },
- expectedHosts: []string{"host1"},
- filteredHosts: []string{"host2", "host5"},
- },
- {
- name: "Multiple instances - require capacity for all on same host",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 3,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.medium",
- VCPUs: 4,
- MemoryMB: 8192, // 8 GB
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host3"},
- {ComputeHost: "host5"},
- },
- },
- expectedHosts: []string{"host1", "host3"},
- filteredHosts: []string{"host5"},
- },
- {
- name: "No hosts have sufficient capacity",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.huge",
- VCPUs: 32,
- MemoryMB: 65536, // 64 GB
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host2"},
- {ComputeHost: "host4"},
- },
+ name: "valid options with lock reserved true",
+ opts: FilterHasEnoughCapacityOpts{
+ LockReserved: true,
},
- expectedHosts: []string{},
- filteredHosts: []string{"host1", "host2", "host4"},
+ expectError: false,
},
{
- name: "Active reservation locks resources - filter host",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"},
- },
+ name: "valid options with lock reserved false",
+ opts: FilterHasEnoughCapacityOpts{
+ LockReserved: false,
},
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-1",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-2",
- FlavorName: "m1.medium",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("2Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host4",
- },
- },
- },
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
- },
- {
- name: "Matching reservation unlocks resources - host passes",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-matching",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-1",
- FlavorName: "m1.small",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("2Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host4",
- },
- },
- },
- opts: FilterHasEnoughCapacityOpts{LockReserved: false},
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
+ expectError: false,
},
{
- name: "LockReserved option - matching reservation still locks resources",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-locked",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-1",
- FlavorName: "m1.small",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("2Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host4",
- },
- },
- },
- opts: FilterHasEnoughCapacityOpts{LockReserved: true},
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
- },
- {
- name: "Inactive reservation does not affect capacity",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-inactive",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-2",
- FlavorName: "m1.medium",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("2Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseFailed,
- Host: "host4",
- },
- },
- },
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
- },
- {
- name: "Reservation without CortexNova scheduler is ignored",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-other-scheduler",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: nil,
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("2Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host4",
- },
- },
- },
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
- },
- {
- name: "Multiple reservations on different hosts",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-1",
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host2"},
- {ComputeHost: "host4"},
- {ComputeHost: "host5"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-host2",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-2",
- FlavorName: "m1.medium",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("2"),
- "memory": resource.MustParse("4Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host2",
- },
- },
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-host5",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-3",
- FlavorName: "m1.large",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("4"),
- "memory": resource.MustParse("8Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host5",
- },
- },
- },
- expectedHosts: []string{"host4", "host5"},
- filteredHosts: []string{"host2"},
- },
- {
- name: "Empty host list",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{},
- },
- expectedHosts: []string{},
- filteredHosts: []string{},
- },
- {
- name: "Host not in database is filtered out",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.small",
- VCPUs: 2,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host-unknown"},
- },
- },
- expectedHosts: []string{"host1"},
- filteredHosts: []string{"host-unknown"},
- },
- {
- name: "Large number of instances - edge case",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 10,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.tiny",
- VCPUs: 1,
- MemoryMB: 512,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host3"},
- },
- },
- expectedHosts: []string{"host1", "host3"},
- filteredHosts: []string{},
- },
- {
- name: "Flavor with zero VCPUs - error case",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "invalid-flavor",
- VCPUs: 0,
- MemoryMB: 2048,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- },
- },
- expectError: true,
- },
- {
- name: "Flavor with zero memory - error case",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "invalid-flavor",
- VCPUs: 2,
- MemoryMB: 0,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- },
- },
- expectError: true,
- },
- {
- name: "Memory boundary - exactly enough memory",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.exact",
- VCPUs: 2,
- MemoryMB: 4096, // Exactly 4 GB
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host4"}, // Has 4 GB free (16-12)
- },
- },
- expectedHosts: []string{"host4"},
- filteredHosts: []string{},
- },
- {
- name: "CPU boundary - exactly enough CPU",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- NumInstances: 1,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.exact-cpu",
- VCPUs: 2, // Exactly 2 vCPUs
- MemoryMB: 1024,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host2"}, // Has 2 vCPUs free (16-14)
- },
- },
- expectedHosts: []string{"host2"},
- filteredHosts: []string{},
- },
- {
- name: "Complex scenario with multiple hosts and reservations",
- request: api.ExternalSchedulerRequest{
- Spec: api.NovaObject[api.NovaSpec]{
- Data: api.NovaSpec{
- ProjectID: "project-test",
- NumInstances: 2,
- Flavor: api.NovaObject[api.NovaFlavor]{
- Data: api.NovaFlavor{
- Name: "m1.test",
- VCPUs: 4,
- MemoryMB: 8192,
- },
- },
- },
- },
- Hosts: []api.ExternalSchedulerHost{
- {ComputeHost: "host1"},
- {ComputeHost: "host3"},
- {ComputeHost: "host5"},
- },
- },
- reservations: []client.Object{
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-host1-matching",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-test",
- FlavorName: "m1.test",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("8"),
- "memory": resource.MustParse("16Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host1",
- },
- },
- &v1alpha1.Reservation{
- ObjectMeta: v1.ObjectMeta{
- Name: "reservation-host5-nonmatching",
- },
- Spec: v1alpha1.ReservationSpec{
- Scheduler: v1alpha1.ReservationSchedulerSpec{
- CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{
- ProjectID: "project-other",
- FlavorName: "m1.other",
- },
- },
- Requests: map[string]resource.Quantity{
- "cpu": resource.MustParse("4"),
- "memory": resource.MustParse("8Gi"),
- },
- },
- Status: v1alpha1.ReservationStatus{
- Phase: v1alpha1.ReservationStatusPhaseActive,
- Host: "host5",
- },
- },
- },
- opts: FilterHasEnoughCapacityOpts{LockReserved: false},
- expectedHosts: []string{"host1", "host3"},
- filteredHosts: []string{"host5"},
+ name: "valid options with default values",
+ opts: FilterHasEnoughCapacityOpts{},
+ expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- // Build the fake client with hypervisors and reservations
- objects := make([]client.Object, 0, len(hvs)+len(tt.reservations))
- objects = append(objects, hvs...)
- objects = append(objects, tt.reservations...)
-
- step := &FilterHasEnoughCapacity{}
- step.Options = tt.opts
- step.Client = fake.NewClientBuilder().
- WithScheme(scheme).
- WithObjects(objects...).
- Build()
-
- result, err := step.Run(slog.Default(), tt.request)
-
- if tt.expectError {
- if err == nil {
- t.Fatalf("expected error, got nil")
- }
- return
- }
-
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
+ err := tt.opts.Validate()
+ if tt.expectError && err == nil {
+ t.Error("expected error, got nil")
}
-
- // Check expected hosts are present
- for _, host := range tt.expectedHosts {
- if _, ok := result.Activations[host]; !ok {
- t.Errorf("expected host %s to be present in activations", host)
- }
- }
-
- // Check filtered hosts are not present
- for _, host := range tt.filteredHosts {
- if _, ok := result.Activations[host]; ok {
- t.Errorf("expected host %s to be filtered out", host)
- }
- }
-
- // Check total count
- if len(result.Activations) != len(tt.expectedHosts) {
- t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations))
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error, got %v", err)
}
})
}
diff --git a/internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go b/internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go
index f8f08da3a..93070523b 100644
--- a/internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go
+++ b/internal/scheduling/pods/plugins/filters/filter_node_affinity_test.go
@@ -8,10 +8,30 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestNodeAffinityFilter_Init(t *testing.T) {
+ filter := &NodeAffinityFilter{}
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := filter.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "node-affinity",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
+
func TestNodeAffinityFilter_Run(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/scheduling/pods/plugins/filters/filter_node_available_test.go b/internal/scheduling/pods/plugins/filters/filter_node_available_test.go
index 3649da5de..0dba64bf0 100644
--- a/internal/scheduling/pods/plugins/filters/filter_node_available_test.go
+++ b/internal/scheduling/pods/plugins/filters/filter_node_available_test.go
@@ -8,10 +8,30 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestNodeAvailableFilter_Init(t *testing.T) {
+ filter := &NodeAvailableFilter{}
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := filter.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "node-available",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
+
func TestNodeAvailableFilter_Run(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go b/internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go
index 790459deb..04f2d2085 100644
--- a/internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go
+++ b/internal/scheduling/pods/plugins/filters/filter_node_capacity_test.go
@@ -8,11 +8,31 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestNodeCapacityFilter_Init(t *testing.T) {
+ filter := &NodeCapacityFilter{}
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := filter.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "node-capacity",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
+
func TestNodeCapacityFilter_Run(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/scheduling/pods/plugins/filters/filter_noop_test.go b/internal/scheduling/pods/plugins/filters/filter_noop_test.go
index e42ae9f23..de3396b56 100644
--- a/internal/scheduling/pods/plugins/filters/filter_noop_test.go
+++ b/internal/scheduling/pods/plugins/filters/filter_noop_test.go
@@ -8,10 +8,30 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestNoopFilter_Init(t *testing.T) {
+ filter := &NoopFilter{}
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := filter.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "noop",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
+
func TestNoopFilter_Run(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/scheduling/pods/plugins/filters/filter_taint_test.go b/internal/scheduling/pods/plugins/filters/filter_taint_test.go
index 605b96114..97d5b2323 100644
--- a/internal/scheduling/pods/plugins/filters/filter_taint_test.go
+++ b/internal/scheduling/pods/plugins/filters/filter_taint_test.go
@@ -8,10 +8,30 @@ import (
"testing"
"github.com/cobaltcore-dev/cortex/api/delegation/pods"
+ "github.com/cobaltcore-dev/cortex/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
+func TestTaintFilter_Init(t *testing.T) {
+ filter := &TaintFilter{}
+ scheme := runtime.NewScheme()
+ cl := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ err := filter.Init(t.Context(), cl, v1alpha1.FilterSpec{
+ Name: "taint",
+ Params: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ })
+
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+}
+
func TestTaintFilter_Run(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/scheduling/pods/plugins/weighers/binpack_test.go b/internal/scheduling/pods/plugins/weighers/binpack_test.go
index 7f82be8c7..7d7eea9b5 100644
--- a/internal/scheduling/pods/plugins/weighers/binpack_test.go
+++ b/internal/scheduling/pods/plugins/weighers/binpack_test.go
@@ -14,6 +14,64 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+func TestBinpackingStepOpts_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ opts BinpackingStepOpts
+ expectError bool
+ }{
+ {
+ name: "valid options with positive weights",
+ opts: BinpackingStepOpts{
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceCPU: 2.0,
+ corev1.ResourceMemory: 1.0,
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "valid options with zero weights",
+ opts: BinpackingStepOpts{
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceCPU: 0.0,
+ corev1.ResourceMemory: 0.0,
+ },
+ },
+ expectError: false,
+ },
+ {
+ name: "valid options with empty weights",
+ opts: BinpackingStepOpts{
+ ResourceWeights: map[corev1.ResourceName]float64{},
+ },
+ expectError: false,
+ },
+ {
+ name: "invalid options with negative weight",
+ opts: BinpackingStepOpts{
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceCPU: -1.0,
+ corev1.ResourceMemory: 1.0,
+ },
+ },
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.opts.Validate()
+ if tt.expectError && err == nil {
+ t.Error("expected error, got nil")
+ }
+ if !tt.expectError && err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ })
+ }
+}
+
func TestBinpackingStep_Run(t *testing.T) {
tests := []struct {
name string
From 4c534e26d99ff869c00806461391c48b28c4f546 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Tue, 27 Jan 2026 14:40:15 +0100
Subject: [PATCH 41/41] Add substatus for filters, weighers, and detectors
---
api/v1alpha1/pipeline_types.go | 57 ++++-
api/v1alpha1/zz_generated.deepcopy.go | 87 +++++++
config/crd/bases/cortex.cloud_pipelines.yaml | 215 +++++++++++++++++-
config/crd/cortex.cloud_pipelines.yaml | 215 +++++++++++++++++-
.../templates/crd/cortex.cloud_pipelines.yaml | 215 +++++++++++++++++-
.../cortex-nova/templates/pipelines_kvm.yaml | 2 +-
...filter_weigher_pipeline_controller_test.go | 16 +-
internal/scheduling/lib/detector_pipeline.go | 13 +-
.../scheduling/lib/detector_pipeline_test.go | 23 +-
.../scheduling/lib/filter_weigher_pipeline.go | 39 ++--
.../lib/filter_weigher_pipeline_test.go | 54 +----
.../scheduling/lib/pipeline_controller.go | 80 ++++++-
.../lib/pipeline_controller_test.go | 9 +-
.../scheduling/lib/pipeline_initializer.go | 15 +-
...filter_weigher_pipeline_controller_test.go | 12 +-
...filter_weigher_pipeline_controller_test.go | 18 +-
.../nova/detector_pipeline_controller.go | 7 +-
.../nova/detector_pipeline_controller_test.go | 22 +-
...filter_weigher_pipeline_controller_test.go | 20 +-
...filter_weigher_pipeline_controller_test.go | 14 +-
20 files changed, 943 insertions(+), 190 deletions(-)
diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go
index ef14c9072..cb2dfbf71 100644
--- a/api/v1alpha1/pipeline_types.go
+++ b/api/v1alpha1/pipeline_types.go
@@ -65,9 +65,9 @@ const (
// Pipeline containing filter-weigher steps for initial placement,
// migration, etc. of instances.
PipelineTypeFilterWeigher PipelineType = "filter-weigher"
- // Pipeline containing descheduler steps for generating descheduling
+ // Pipeline containing detector steps, e.g. for generating descheduling
// recommendations.
- PipelineTypeDescheduler PipelineType = "descheduler"
+ PipelineTypeDetector PipelineType = "detector"
)
type PipelineSpec struct {
@@ -85,14 +85,14 @@ type PipelineSpec struct {
CreateDecisions bool `json:"createDecisions,omitempty"`
// The type of the pipeline, used to differentiate between
- // filter-weigher and descheduler pipelines within the same
+ // filter-weigher and detector pipelines within the same
// scheduling domain.
//
// If the type is filter-weigher, the filter and weigher attributes
- // must be set. If the type is descheduler, the detectors attribute
+ // must be set. If the type is detector, the detectors attribute
// must be set.
//
- // +kubebuilder:validation:Enum=filter-weigher;descheduler
+ // +kubebuilder:validation:Enum=filter-weigher;detector
Type PipelineType `json:"type"`
// Ordered list of filters to apply in a scheduling pipeline.
@@ -112,13 +112,46 @@ type PipelineSpec struct {
// Ordered list of detectors to apply in a descheduling pipeline.
//
- // This attribute is set only if the pipeline type is descheduler.
+ // This attribute is set only if the pipeline type is detector.
// Detectors find candidates for descheduling (migration off current host).
// These detectors are run after weighers are applied.
// +kubebuilder:validation:Optional
Detectors []DetectorSpec `json:"detectors,omitempty"`
}
+const (
+ FilterConditionReady = "Ready"
+ WeigherConditionReady = "Ready"
+ DetectorConditionReady = "Ready"
+)
+
+type FilterStatus struct {
+ // The name of the filter.
+ Name string `json:"name"`
+
+ // The current status conditions of the filter.
+ // +kubebuilder:validation:Optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+type WeigherStatus struct {
+ // The name of the weigher.
+ Name string `json:"name"`
+
+ // The current status conditions of the weigher.
+ // +kubebuilder:validation:Optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+type DetectorStatus struct {
+ // The name of the detector.
+ Name string `json:"name"`
+
+ // The current status conditions of the detector.
+ // +kubebuilder:validation:Optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
const (
// The pipeline is ready to be used.
PipelineConditionReady = "Ready"
@@ -127,6 +160,18 @@ const (
)
type PipelineStatus struct {
+ // List of statuses for each filter in the pipeline.
+ // +kubebuilder:validation:Optional
+ Filters []FilterStatus `json:"filters,omitempty"`
+
+ // List of statuses for each weigher in the pipeline.
+ // +kubebuilder:validation:Optional
+ Weighers []WeigherStatus `json:"weighers,omitempty"`
+
+ // List of statuses for each detector in the pipeline.
+ // +kubebuilder:validation:Optional
+ Detectors []DetectorStatus `json:"detectors,omitempty"`
+
// The current status conditions of the pipeline.
// +kubebuilder:validation:Optional
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index cdbae7dd4..2551ef3ea 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -441,6 +441,28 @@ func (in *DetectorSpec) DeepCopy() *DetectorSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DetectorStatus) DeepCopyInto(out *DetectorStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorStatus.
+func (in *DetectorStatus) DeepCopy() *DetectorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DetectorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilterSpec) DeepCopyInto(out *FilterSpec) {
*out = *in
@@ -457,6 +479,28 @@ func (in *FilterSpec) DeepCopy() *FilterSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FilterStatus) DeepCopyInto(out *FilterStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterStatus.
+func (in *FilterStatus) DeepCopy() *FilterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(FilterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityDatasource) DeepCopyInto(out *IdentityDatasource) {
*out = *in
@@ -908,6 +952,27 @@ func (in *PipelineSpec) DeepCopy() *PipelineSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
*out = *in
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]FilterStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Weighers != nil {
+ in, out := &in.Weighers, &out.Weighers
+ *out = make([]WeigherStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Detectors != nil {
+ in, out := &in.Detectors, &out.Detectors
+ *out = make([]DetectorStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
@@ -1149,3 +1214,25 @@ func (in *WeigherSpec) DeepCopy() *WeigherSpec {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WeigherStatus) DeepCopyInto(out *WeigherStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeigherStatus.
+func (in *WeigherStatus) DeepCopy() *WeigherStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(WeigherStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/config/crd/bases/cortex.cloud_pipelines.yaml b/config/crd/bases/cortex.cloud_pipelines.yaml
index 36402b022..c86d2864d 100644
--- a/config/crd/bases/cortex.cloud_pipelines.yaml
+++ b/config/crd/bases/cortex.cloud_pipelines.yaml
@@ -69,7 +69,7 @@ spec:
description: |-
Ordered list of detectors to apply in a descheduling pipeline.
- This attribute is set only if the pipeline type is descheduler.
+ This attribute is set only if the pipeline type is detector.
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
@@ -129,15 +129,15 @@ spec:
type:
description: |-
The type of the pipeline, used to differentiate between
- filter-weigher and descheduler pipelines within the same
+ filter-weigher and detector pipelines within the same
scheduling domain.
If the type is filter-weigher, the filter and weigher attributes
- must be set. If the type is descheduler, the detectors attribute
+ must be set. If the type is detector, the detectors attribute
must be set.
enum:
- filter-weigher
- - descheduler
+ - detector
type: string
weighers:
description: |-
@@ -236,6 +236,213 @@ spec:
- type
type: object
type: array
+ detectors:
+ description: List of statuses for each detector in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the detector.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the detector.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: List of statuses for each filter in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the filter.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the filter.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ weighers:
+ description: List of statuses for each weigher in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the weigher.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the weigher.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
type: object
required:
- spec
diff --git a/config/crd/cortex.cloud_pipelines.yaml b/config/crd/cortex.cloud_pipelines.yaml
index 36402b022..c86d2864d 100644
--- a/config/crd/cortex.cloud_pipelines.yaml
+++ b/config/crd/cortex.cloud_pipelines.yaml
@@ -69,7 +69,7 @@ spec:
description: |-
Ordered list of detectors to apply in a descheduling pipeline.
- This attribute is set only if the pipeline type is descheduler.
+ This attribute is set only if the pipeline type is detector.
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
@@ -129,15 +129,15 @@ spec:
type:
description: |-
The type of the pipeline, used to differentiate between
- filter-weigher and descheduler pipelines within the same
+ filter-weigher and detector pipelines within the same
scheduling domain.
If the type is filter-weigher, the filter and weigher attributes
- must be set. If the type is descheduler, the detectors attribute
+ must be set. If the type is detector, the detectors attribute
must be set.
enum:
- filter-weigher
- - descheduler
+ - detector
type: string
weighers:
description: |-
@@ -236,6 +236,213 @@ spec:
- type
type: object
type: array
+ detectors:
+ description: List of statuses for each detector in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the detector.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the detector.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: List of statuses for each filter in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the filter.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the filter.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ weighers:
+ description: List of statuses for each weigher in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the weigher.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the weigher.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
type: object
required:
- spec
diff --git a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
index b835b13c0..2f68972d1 100644
--- a/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
+++ b/dist/chart/templates/crd/cortex.cloud_pipelines.yaml
@@ -75,7 +75,7 @@ spec:
description: |-
Ordered list of detectors to apply in a descheduling pipeline.
- This attribute is set only if the pipeline type is descheduler.
+ This attribute is set only if the pipeline type is detector.
Detectors find candidates for descheduling (migration off current host).
These detectors are run after weighers are applied.
items:
@@ -135,15 +135,15 @@ spec:
type:
description: |-
The type of the pipeline, used to differentiate between
- filter-weigher and descheduler pipelines within the same
+ filter-weigher and detector pipelines within the same
scheduling domain.
If the type is filter-weigher, the filter and weigher attributes
- must be set. If the type is descheduler, the detectors attribute
+ must be set. If the type is detector, the detectors attribute
must be set.
enum:
- filter-weigher
- - descheduler
+ - detector
type: string
weighers:
description: |-
@@ -242,6 +242,213 @@ spec:
- type
type: object
type: array
+ detectors:
+ description: List of statuses for each detector in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the detector.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the detector.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ filters:
+ description: List of statuses for each filter in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the filter.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the filter.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ weighers:
+ description: List of statuses for each weigher in the pipeline.
+ items:
+ properties:
+ conditions:
+ description: The current status conditions of the weigher.
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ name:
+ description: The name of the weigher.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
type: object
required:
- spec
diff --git a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
index 3d9f2b694..89214ae63 100644
--- a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
+++ b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml
@@ -130,7 +130,7 @@ spec:
This pipeline runs steps that select virtual machines to deschedule from
compute hosts in order to optimize resource usage and performance.
This is the pipeline used for KVM hypervisors (qemu and cloud-hypervisor).
- type: descheduler
+ type: detector
createDecisions: true
detectors:
- name: avoid_high_steal_pct
diff --git a/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go b/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
index d5389ae38..62ac788a9 100644
--- a/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/cinder/filter_weigher_pipeline_controller_test.go
@@ -179,7 +179,7 @@ func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
Weighers: []v1alpha1.WeigherSpec{},
},
})
- if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ if len(initResult.FilterErrors) > 0 || len(initResult.WeigherErrors) > 0 {
t.Fatalf("Failed to init pipeline: %v", initResult)
}
controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
@@ -411,7 +411,7 @@ func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T)
if tt.pipelineConfig != nil {
controller.PipelineConfigs[tt.pipelineConfig.Name] = *tt.pipelineConfig
initResult := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
- if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ if len(initResult.FilterErrors) > 0 || len(initResult.WeigherErrors) > 0 {
t.Fatalf("Failed to init pipeline: %v", initResult)
}
controller.Pipelines[tt.pipelineConfig.Name] = initResult.Pipeline
@@ -524,18 +524,18 @@ func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectCriticalError && initResult.CriticalErr == nil {
+ if tt.expectCriticalError && len(initResult.FilterErrors) == 0 {
t.Error("Expected error but got none")
}
- if !tt.expectCriticalError && initResult.CriticalErr != nil {
- t.Errorf("Expected no error but got: %v", initResult.CriticalErr)
+ if !tt.expectCriticalError && len(initResult.FilterErrors) > 0 {
+ t.Errorf("Expected no error but got: %v", initResult.FilterErrors)
}
- if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ if tt.expectNonCriticalError && len(initResult.WeigherErrors) == 0 {
t.Error("Expected non-critical error but got none")
}
- if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
- t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
+ if !tt.expectNonCriticalError && len(initResult.WeigherErrors) > 0 {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.WeigherErrors)
}
})
}
diff --git a/internal/scheduling/lib/detector_pipeline.go b/internal/scheduling/lib/detector_pipeline.go
index e7a6c9758..a029776fc 100644
--- a/internal/scheduling/lib/detector_pipeline.go
+++ b/internal/scheduling/lib/detector_pipeline.go
@@ -34,27 +34,28 @@ func (p *DetectorPipeline[DetectionType]) Init(
ctx context.Context,
confedSteps []v1alpha1.DetectorSpec,
supportedSteps map[string]Detector[DetectionType],
-) (nonCriticalErr, criticalErr error) {
+) (detectorErrs map[string]error) {
p.order = []string{}
// Load all steps from the configuration.
p.steps = make(map[string]Detector[DetectionType], len(confedSteps))
+ detectorErrs = make(map[string]error)
for _, stepConf := range confedSteps {
step, ok := supportedSteps[stepConf.Name]
if !ok {
- nonCriticalErr = errors.New("descheduler: unsupported step name: " + stepConf.Name)
- continue // Descheduler steps are optional.
+ detectorErrs[stepConf.Name] = errors.New("descheduler: unsupported step name: " + stepConf.Name)
+ continue
}
step = monitorDetector(step, stepConf, p.Monitor)
if err := step.Init(ctx, p.Client, stepConf); err != nil {
- nonCriticalErr = errors.New("descheduler: failed to initialize step " + stepConf.Name + ": " + err.Error())
- continue // Descheduler steps are optional.
+ detectorErrs[stepConf.Name] = errors.New("descheduler: failed to initialize step " + stepConf.Name + ": " + err.Error())
+ continue
}
p.steps[stepConf.Name] = step
p.order = append(p.order, stepConf.Name)
slog.Info("descheduler: added step", "name", stepConf.Name)
}
- return nonCriticalErr, nil // At the moment, there are no critical errors.
+ return detectorErrs
}
// Execute the descheduler steps in parallel and collect the decisions made by
diff --git a/internal/scheduling/lib/detector_pipeline_test.go b/internal/scheduling/lib/detector_pipeline_test.go
index 79f39db53..4d1857f1f 100644
--- a/internal/scheduling/lib/detector_pipeline_test.go
+++ b/internal/scheduling/lib/detector_pipeline_test.go
@@ -36,7 +36,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
confedSteps []v1alpha1.DetectorSpec
supportedSteps map[string]Detector[mockDetection]
expectNonCritical bool
- expectCritical bool
expectedStepsCount int
}{
{
@@ -48,7 +47,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
"step1": &mockDetectorStep{},
},
expectNonCritical: false,
- expectCritical: false,
expectedStepsCount: 1,
},
{
@@ -62,7 +60,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
"step2": &mockDetectorStep{},
},
expectNonCritical: false,
- expectCritical: false,
expectedStepsCount: 2,
},
{
@@ -72,7 +69,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
},
supportedSteps: map[string]Detector[mockDetection]{},
expectNonCritical: true,
- expectCritical: false,
expectedStepsCount: 0,
},
{
@@ -84,7 +80,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
"failing_step": &mockDetectorStep{initErr: errors.New("init failed")},
},
expectNonCritical: true,
- expectCritical: false,
expectedStepsCount: 0,
},
{
@@ -92,7 +87,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
confedSteps: []v1alpha1.DetectorSpec{},
supportedSteps: map[string]Detector[mockDetection]{},
expectNonCritical: false,
- expectCritical: false,
expectedStepsCount: 0,
},
{
@@ -105,7 +99,6 @@ func TestDetectorPipeline_Init(t *testing.T) {
"valid_step": &mockDetectorStep{},
},
expectNonCritical: true,
- expectCritical: false,
expectedStepsCount: 1,
},
}
@@ -118,23 +111,17 @@ func TestDetectorPipeline_Init(t *testing.T) {
Monitor: DetectorPipelineMonitor{},
}
- nonCriticalErr, criticalErr := pipeline.Init(
+ errs := pipeline.Init(
context.Background(),
tt.confedSteps,
tt.supportedSteps,
)
- if tt.expectNonCritical && nonCriticalErr == nil {
- t.Error("expected non-critical error, got nil")
+ if tt.expectNonCritical && len(errs) == 0 {
+ t.Errorf("expected non-critical errors, got none")
}
- if !tt.expectNonCritical && nonCriticalErr != nil {
- t.Errorf("expected no non-critical error, got %v", nonCriticalErr)
- }
- if tt.expectCritical && criticalErr == nil {
- t.Error("expected critical error, got nil")
- }
- if !tt.expectCritical && criticalErr != nil {
- t.Errorf("expected no critical error, got %v", criticalErr)
+ if !tt.expectNonCritical && len(errs) > 0 {
+ t.Errorf("did not expect non-critical errors, got: %v", errs)
}
if len(pipeline.steps) != tt.expectedStepsCount {
t.Errorf("expected %d steps, got %d", tt.expectedStepsCount, len(pipeline.steps))
diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go
index 6ddcbc537..76c306d86 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline.go
@@ -55,34 +55,26 @@ func InitNewFilterWeigherPipeline[RequestType FilterWeigherPipelineRequest](
pipelineMonitor := monitor.SubPipeline(name)
- // Ensure there are no overlaps between filter and weigher names.
- for filterName := range supportedFilters {
- if _, ok := supportedWeighers[filterName]; ok {
- return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
- CriticalErr: errors.New("step name overlap between filters and weighers: " + filterName),
- }
- }
- }
-
// Load all filters from the configuration.
filtersByName := make(map[string]Filter[RequestType], len(confedFilters))
filtersOrder := []string{}
+ filterErrors := make(map[string]error)
for _, filterConfig := range confedFilters {
slog.Info("scheduler: configuring filter", "name", filterConfig.Name)
slog.Info("supported:", "filters", maps.Keys(supportedFilters))
makeFilter, ok := supportedFilters[filterConfig.Name]
if !ok {
- return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
- CriticalErr: errors.New("unsupported filter name: " + filterConfig.Name),
- }
+ slog.Error("scheduler: unsupported filter", "name", filterConfig.Name)
+ filterErrors[filterConfig.Name] = errors.New("unsupported filter name: " + filterConfig.Name)
+ continue
}
filter := makeFilter()
- filter = monitorFilter(filter, filterConfig.Name, pipelineMonitor)
filter = validateFilter(filter)
+ filter = monitorFilter(filter, filterConfig.Name, pipelineMonitor)
if err := filter.Init(ctx, client, filterConfig); err != nil {
- return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
- CriticalErr: errors.New("failed to initialize filter: " + err.Error()),
- }
+ slog.Error("scheduler: failed to initialize filter", "name", filterConfig.Name, "error", err)
+ filterErrors[filterConfig.Name] = errors.New("failed to initialize filter: " + err.Error())
+ continue
}
filtersByName[filterConfig.Name] = filter
filtersOrder = append(filtersOrder, filterConfig.Name)
@@ -93,22 +85,24 @@ func InitNewFilterWeigherPipeline[RequestType FilterWeigherPipelineRequest](
weighersByName := make(map[string]Weigher[RequestType], len(confedWeighers))
weighersMultipliers := make(map[string]float64, len(confedWeighers))
weighersOrder := []string{}
- var nonCriticalErr error
+ weigherErrors := make(map[string]error)
for _, weigherConfig := range confedWeighers {
slog.Info("scheduler: configuring weigher", "name", weigherConfig.Name)
slog.Info("supported:", "weighers", maps.Keys(supportedWeighers))
makeWeigher, ok := supportedWeighers[weigherConfig.Name]
if !ok {
- nonCriticalErr = errors.New("unsupported weigher name: " + weigherConfig.Name)
- continue // Weighers are optional.
+ slog.Error("scheduler: unsupported weigher", "name", weigherConfig.Name)
+ weigherErrors[weigherConfig.Name] = errors.New("unsupported weigher name: " + weigherConfig.Name)
+ continue
}
weigher := makeWeigher()
// Validate that the weigher doesn't unexpectedly filter out hosts.
weigher = validateWeigher(weigher)
weigher = monitorWeigher(weigher, weigherConfig.Name, pipelineMonitor)
if err := weigher.Init(ctx, client, weigherConfig); err != nil {
- nonCriticalErr = errors.New("failed to initialize weigher: " + err.Error())
- continue // Weighers are optional.
+ slog.Error("scheduler: failed to initialize weigher", "name", weigherConfig.Name, "error", err)
+ weigherErrors[weigherConfig.Name] = errors.New("failed to initialize weigher: " + err.Error())
+ continue
}
weighersByName[weigherConfig.Name] = weigher
weighersOrder = append(weighersOrder, weigherConfig.Name)
@@ -121,7 +115,8 @@ func InitNewFilterWeigherPipeline[RequestType FilterWeigherPipelineRequest](
}
return PipelineInitResult[FilterWeigherPipeline[RequestType]]{
- NonCriticalErr: nonCriticalErr,
+ FilterErrors: filterErrors,
+ WeigherErrors: weigherErrors,
Pipeline: &filterWeigherPipeline[RequestType]{
filtersOrder: filtersOrder,
filters: filtersByName,
diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go
index 726be0fa9..aa80cbc9d 100644
--- a/internal/scheduling/lib/filter_weigher_pipeline_test.go
+++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go
@@ -284,8 +284,11 @@ func TestInitNewFilterWeigherPipeline_Success(t *testing.T) {
monitor,
)
- if result.CriticalErr != nil {
- t.Fatalf("expected no critical error, got %v", result.CriticalErr)
+ if len(result.FilterErrors) != 0 {
+ t.Fatalf("expected no filter error, got %v", result.FilterErrors)
+ }
+ if len(result.WeigherErrors) != 0 {
+ t.Fatalf("expected no weigher error, got %v", result.WeigherErrors)
}
if result.Pipeline == nil {
t.Fatal("expected pipeline, got nil")
@@ -323,47 +326,11 @@ func TestInitNewFilterWeigherPipeline_UnsupportedFilter(t *testing.T) {
monitor,
)
- if result.CriticalErr == nil {
+ if result.FilterErrors["unsupported-filter"] == nil {
t.Fatal("expected critical error for unsupported filter, got nil")
}
}
-func TestInitNewFilterWeigherPipeline_NameOverlap(t *testing.T) {
- scheme := runtime.NewScheme()
- cl := fake.NewClientBuilder().WithScheme(scheme).Build()
-
- // Create filter and weigher with same name
- supportedFilters := map[string]func() Filter[mockFilterWeigherPipelineRequest]{
- "duplicate-name": func() Filter[mockFilterWeigherPipelineRequest] {
- return &mockFilter[mockFilterWeigherPipelineRequest]{}
- },
- }
- supportedWeighers := map[string]func() Weigher[mockFilterWeigherPipelineRequest]{
- "duplicate-name": func() Weigher[mockFilterWeigherPipelineRequest] {
- return &mockWeigher[mockFilterWeigherPipelineRequest]{}
- },
- }
-
- monitor := FilterWeigherPipelineMonitor{
- PipelineName: "test-pipeline",
- }
-
- result := InitNewFilterWeigherPipeline(
- t.Context(),
- cl,
- "test-pipeline",
- supportedFilters,
- nil,
- supportedWeighers,
- nil,
- monitor,
- )
-
- if result.CriticalErr == nil {
- t.Fatal("expected critical error for name overlap, got nil")
- }
-}
-
func TestInitNewFilterWeigherPipeline_UnsupportedWeigher(t *testing.T) {
scheme := runtime.NewScheme()
cl := fake.NewClientBuilder().WithScheme(scheme).Build()
@@ -395,12 +362,11 @@ func TestInitNewFilterWeigherPipeline_UnsupportedWeigher(t *testing.T) {
monitor,
)
- // Unsupported weigher should result in non-critical error
- if result.NonCriticalErr == nil {
- t.Fatal("expected non-critical error for unsupported weigher, got nil")
+ if result.WeigherErrors["unsupported-weigher"] == nil {
+ t.Fatal("expected error for unsupported weigher, got nil")
}
- if result.CriticalErr != nil {
- t.Fatalf("expected no critical error, got %v", result.CriticalErr)
+ if len(result.FilterErrors) != 0 {
+ t.Fatalf("expected no filter error, got %v", result.FilterErrors)
}
}
diff --git a/internal/scheduling/lib/pipeline_controller.go b/internal/scheduling/lib/pipeline_controller.go
index 972a0564b..895e5e192 100644
--- a/internal/scheduling/lib/pipeline_controller.go
+++ b/internal/scheduling/lib/pipeline_controller.go
@@ -5,6 +5,7 @@ package lib
import (
"context"
+ "errors"
"fmt"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
@@ -73,20 +74,84 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
initResult := c.Initializer.InitPipeline(ctx, *obj)
+ obj.Status.Filters = []v1alpha1.FilterStatus{}
+ for _, filter := range obj.Spec.Filters {
+ fs := v1alpha1.FilterStatus{Name: filter.Name}
+ if err, ok := initResult.FilterErrors[filter.Name]; ok {
+ meta.SetStatusCondition(&fs.Conditions, metav1.Condition{
+ Type: v1alpha1.FilterConditionReady,
+ Status: metav1.ConditionFalse,
+ Reason: "FilterInitFailed",
+ Message: err.Error(),
+ })
+ } else {
+ meta.SetStatusCondition(&fs.Conditions, metav1.Condition{
+ Type: v1alpha1.FilterConditionReady,
+ Status: metav1.ConditionTrue,
+ Reason: "FilterReady",
+ Message: "filter is ready",
+ })
+ }
+ obj.Status.Filters = append(obj.Status.Filters, fs)
+ }
+
+ obj.Status.Weighers = []v1alpha1.WeigherStatus{}
+ for _, weigher := range obj.Spec.Weighers {
+ ws := v1alpha1.WeigherStatus{Name: weigher.Name}
+ if err, ok := initResult.WeigherErrors[weigher.Name]; ok {
+ meta.SetStatusCondition(&ws.Conditions, metav1.Condition{
+ Type: v1alpha1.WeigherConditionReady,
+ Status: metav1.ConditionFalse,
+ Reason: "WeigherInitFailed",
+ Message: err.Error(),
+ })
+ } else {
+ meta.SetStatusCondition(&ws.Conditions, metav1.Condition{
+ Type: v1alpha1.WeigherConditionReady,
+ Status: metav1.ConditionTrue,
+ Reason: "WeigherReady",
+ Message: "weigher is ready",
+ })
+ }
+ obj.Status.Weighers = append(obj.Status.Weighers, ws)
+ }
+
+ obj.Status.Detectors = []v1alpha1.DetectorStatus{}
+ for _, detector := range obj.Spec.Detectors {
+ ds := v1alpha1.DetectorStatus{Name: detector.Name}
+ if err, ok := initResult.DetectorErrors[detector.Name]; ok {
+ meta.SetStatusCondition(&ds.Conditions, metav1.Condition{
+ Type: v1alpha1.DetectorConditionReady,
+ Status: metav1.ConditionFalse,
+ Reason: "DetectorInitFailed",
+ Message: err.Error(),
+ })
+ } else {
+ meta.SetStatusCondition(&ds.Conditions, metav1.Condition{
+ Type: v1alpha1.DetectorConditionReady,
+ Status: metav1.ConditionTrue,
+ Reason: "DetectorReady",
+ Message: "detector is ready",
+ })
+ }
+ obj.Status.Detectors = append(obj.Status.Detectors, ds)
+ }
+
// If there was a critical error, the pipeline cannot be used.
- if initResult.CriticalErr != nil {
- log.Error(initResult.CriticalErr, "failed to create pipeline", "pipelineName", obj.Name)
+ if len(initResult.FilterErrors) > 0 {
+ err := errors.New("one or more filters failed to initialize")
+ log.Error(err, "failed to create pipeline", "pipelineName", obj.Name)
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
Type: v1alpha1.PipelineConditionReady,
Status: metav1.ConditionFalse,
Reason: "PipelineInitFailed",
- Message: initResult.CriticalErr.Error(),
+ Message: err.Error(),
})
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
Type: v1alpha1.PipelineConditionAllStepsReady,
Status: metav1.ConditionFalse,
Reason: "PipelineInitFailed",
- Message: initResult.CriticalErr.Error(),
+ Message: err.Error(),
})
patch := client.MergeFrom(old)
if err := c.Status().Patch(ctx, obj, patch); err != nil {
@@ -99,13 +164,14 @@ func (c *BasePipelineController[PipelineType]) handlePipelineChange(
// If there was a non-critical error, continue running the pipeline but
// report the error in the pipeline status.
- if initResult.NonCriticalErr != nil {
- log.Error(initResult.NonCriticalErr, "non-critical error during pipeline initialization", "pipelineName", obj.Name)
+ if len(initResult.WeigherErrors) > 0 || len(initResult.DetectorErrors) > 0 {
+ err := errors.New("one or more weighers or detectors failed to initialize")
+ log.Error(err, "non-critical error during pipeline initialization", "pipelineName", obj.Name)
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
Type: v1alpha1.PipelineConditionAllStepsReady,
Status: metav1.ConditionFalse,
Reason: "SomeStepsNotReady",
- Message: initResult.NonCriticalErr.Error(),
+ Message: err.Error(),
})
} else {
meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{
diff --git a/internal/scheduling/lib/pipeline_controller_test.go b/internal/scheduling/lib/pipeline_controller_test.go
index ffe20d13f..9166288fe 100644
--- a/internal/scheduling/lib/pipeline_controller_test.go
+++ b/internal/scheduling/lib/pipeline_controller_test.go
@@ -5,6 +5,7 @@ package lib
import (
"context"
+ "errors"
"testing"
"k8s.io/apimachinery/pkg/api/meta"
@@ -90,7 +91,7 @@ func TestBasePipelineController_InitAllPipelines(t *testing.T) {
},
Spec: v1alpha1.PipelineSpec{
SchedulingDomain: v1alpha1.SchedulingDomainNova,
- Type: v1alpha1.PipelineTypeDescheduler,
+ Type: v1alpha1.PipelineTypeDetector,
Filters: []v1alpha1.FilterSpec{},
Weighers: []v1alpha1.WeigherSpec{},
},
@@ -265,7 +266,11 @@ func TestBasePipelineController_handlePipelineChange(t *testing.T) {
if tt.initPipelineError {
initializer.initPipelineFunc = func(ctx context.Context, p v1alpha1.Pipeline) PipelineInitResult[mockPipeline] {
- return PipelineInitResult[mockPipeline]{CriticalErr: context.Canceled}
+ return PipelineInitResult[mockPipeline]{
+ FilterErrors: map[string]error{
+ "test-filter": errors.New("failed to init filter"),
+ },
+ }
}
}
diff --git a/internal/scheduling/lib/pipeline_initializer.go b/internal/scheduling/lib/pipeline_initializer.go
index 1b0ca6f3b..b51d72e4e 100644
--- a/internal/scheduling/lib/pipeline_initializer.go
+++ b/internal/scheduling/lib/pipeline_initializer.go
@@ -14,15 +14,12 @@ type PipelineInitResult[PipelineType any] struct {
// The pipeline, if successfully created.
Pipeline PipelineType
- // A critical error that prevented the pipeline from being initialized.
- // If a critical error occurs, the pipeline should not be used.
- CriticalErr error
-
- // A non-critical error that occurred during initialization.
- // If a non-critical error occurs, the pipeline may still be used.
- // However, the error should be reported in the pipeline status
- // so we can debug potential issues.
- NonCriticalErr error
+ // Errors for filters, if any, by their name.
+ FilterErrors map[string]error
+ // Errors for weighers, if any, by their name.
+ WeigherErrors map[string]error
+ // Errors for detectors, if any, by their name.
+ DetectorErrors map[string]error
}
// The base pipeline controller will delegate some methods to the parent
diff --git a/internal/scheduling/machines/filter_weigher_pipeline_controller_test.go b/internal/scheduling/machines/filter_weigher_pipeline_controller_test.go
index fa207f8eb..7df66091b 100644
--- a/internal/scheduling/machines/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/machines/filter_weigher_pipeline_controller_test.go
@@ -255,17 +255,17 @@ func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectCriticalError && initResult.CriticalErr == nil {
+ if tt.expectCriticalError && len(initResult.FilterErrors) == 0 {
t.Error("Expected critical error but got none")
}
- if !tt.expectCriticalError && initResult.CriticalErr != nil {
- t.Errorf("Expected no critical error but got: %v", initResult.CriticalErr)
+ if !tt.expectCriticalError && len(initResult.FilterErrors) > 0 {
+ t.Errorf("Expected no critical error but got: %v", initResult.FilterErrors)
}
- if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ if tt.expectNonCriticalError && len(initResult.WeigherErrors) == 0 {
t.Error("Expected non-critical error but got none")
}
- if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
- t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
+ if !tt.expectNonCriticalError && len(initResult.WeigherErrors) > 0 {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.WeigherErrors)
}
})
}
diff --git a/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
index 1739ae362..33f10888f 100644
--- a/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/manila/filter_weigher_pipeline_controller_test.go
@@ -408,7 +408,7 @@ func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T)
if tt.pipelineConfig != nil {
controller.PipelineConfigs[tt.pipelineConfig.Name] = *tt.pipelineConfig
initResult := controller.InitPipeline(t.Context(), *tt.pipelineConfig)
- if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
+ if len(initResult.FilterErrors) > 0 || len(initResult.WeigherErrors) > 0 {
t.Fatalf("Failed to init pipeline: %v", initResult)
}
controller.Pipelines[tt.pipelineConfig.Name] = initResult.Pipeline
@@ -575,18 +575,18 @@ func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectCriticalError && initResult.CriticalErr == nil {
- t.Error("Expected error but got none")
+ if !tt.expectCriticalError && len(initResult.FilterErrors) > 0 {
+ t.Errorf("Expected no critical error but got: %v", initResult.FilterErrors)
}
- if !tt.expectCriticalError && initResult.CriticalErr != nil {
- t.Errorf("Expected no error but got: %v", initResult.CriticalErr)
+ if tt.expectCriticalError && len(initResult.FilterErrors) == 0 {
+ t.Error("Expected critical error but got none")
}
- if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
- t.Error("Expected non-critical error but got none")
+ if !tt.expectNonCriticalError && len(initResult.WeigherErrors) > 0 {
+ t.Errorf("Expected no non-critical error but got: %v", initResult.WeigherErrors)
}
- if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
- t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
+ if tt.expectNonCriticalError && len(initResult.WeigherErrors) == 0 {
+ t.Error("Expected non-critical error but got none")
}
})
}
diff --git a/internal/scheduling/nova/detector_pipeline_controller.go b/internal/scheduling/nova/detector_pipeline_controller.go
index 5b13e62f5..7bd773245 100644
--- a/internal/scheduling/nova/detector_pipeline_controller.go
+++ b/internal/scheduling/nova/detector_pipeline_controller.go
@@ -43,7 +43,7 @@ type DetectorPipelineController struct {
// The type of pipeline this controller manages.
func (c *DetectorPipelineController) PipelineType() v1alpha1.PipelineType {
- return v1alpha1.PipelineTypeDescheduler
+ return v1alpha1.PipelineTypeDetector
}
// The base controller will delegate the pipeline creation down to this method.
@@ -57,11 +57,10 @@ func (c *DetectorPipelineController) InitPipeline(
DetectorCycleBreaker: c.DetectorCycleBreaker,
Monitor: c.Monitor.SubPipeline(p.Name),
}
- nonCriticalErr, criticalErr := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
+ errs := pipeline.Init(ctx, p.Spec.Detectors, supportedDetectors)
return lib.PipelineInitResult[*lib.DetectorPipeline[plugins.VMDetection]]{
Pipeline: pipeline,
- NonCriticalErr: nonCriticalErr,
- CriticalErr: criticalErr,
+ DetectorErrors: errs,
}
}
diff --git a/internal/scheduling/nova/detector_pipeline_controller_test.go b/internal/scheduling/nova/detector_pipeline_controller_test.go
index 0aa6952ea..29df28631 100644
--- a/internal/scheduling/nova/detector_pipeline_controller_test.go
+++ b/internal/scheduling/nova/detector_pipeline_controller_test.go
@@ -42,7 +42,6 @@ func TestDetectorPipelineController_InitPipeline(t *testing.T) {
name string
steps []v1alpha1.DetectorSpec
expectNonCriticalError bool
- expectCriticalError bool
}{
{
name: "successful pipeline initialization",
@@ -52,7 +51,6 @@ func TestDetectorPipelineController_InitPipeline(t *testing.T) {
},
},
expectNonCriticalError: false,
- expectCriticalError: false,
},
{
name: "unsupported step",
@@ -62,13 +60,11 @@ func TestDetectorPipelineController_InitPipeline(t *testing.T) {
},
},
expectNonCriticalError: true,
- expectCriticalError: false,
},
{
name: "empty steps",
steps: []v1alpha1.DetectorSpec{},
expectNonCriticalError: false,
- expectCriticalError: false,
},
}
@@ -83,27 +79,17 @@ func TestDetectorPipelineController_InitPipeline(t *testing.T) {
DetectorCycleBreaker: controller.DetectorCycleBreaker,
Monitor: controller.Monitor,
}
- nonCriticalErr, criticalErr := pipeline.Init(t.Context(), tt.steps, map[string]lib.Detector[plugins.VMDetection]{
+ errs := pipeline.Init(t.Context(), tt.steps, map[string]lib.Detector[plugins.VMDetection]{
"mock-step": &mockControllerStep{},
})
- if tt.expectCriticalError {
- if criticalErr == nil {
- t.Errorf("expected critical error, got none")
- }
- } else {
- if criticalErr != nil {
- t.Errorf("unexpected critical error: %v", criticalErr)
- }
- }
-
if tt.expectNonCriticalError {
- if nonCriticalErr == nil {
+ if len(errs) == 0 {
t.Errorf("expected non-critical error, got none")
}
} else {
- if nonCriticalErr != nil {
- t.Errorf("unexpected non-critical error: %v", nonCriticalErr)
+ if len(errs) > 0 {
+ t.Errorf("unexpected non-critical error: %v", errs)
}
}
diff --git a/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go b/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
index 5bd41a092..7db3faec3 100644
--- a/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/nova/filter_weigher_pipeline_controller_test.go
@@ -213,8 +213,8 @@ func TestFilterWeigherPipelineController_Reconcile(t *testing.T) {
},
Spec: tt.pipeline.Spec,
})
- if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
- t.Fatalf("Failed to init pipeline: %v", err)
+ if len(initResult.FilterErrors) > 0 || len(initResult.WeigherErrors) > 0 {
+ t.Fatalf("Failed to initialize pipeline: filter errors: %v, weigher errors: %v", initResult.FilterErrors, initResult.WeigherErrors)
}
controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
@@ -336,17 +336,17 @@ func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectCriticalError && initResult.CriticalErr == nil {
+ if tt.expectCriticalError && len(initResult.FilterErrors) == 0 {
t.Error("Expected critical error but got none")
}
- if !tt.expectCriticalError && initResult.CriticalErr != nil {
- t.Errorf("Expected no critical error but got: %v", initResult.CriticalErr)
+ if !tt.expectCriticalError && len(initResult.FilterErrors) > 0 {
+ t.Errorf("Unexpected critical errors: %v", initResult.FilterErrors)
}
- if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ if tt.expectNonCriticalError && len(initResult.WeigherErrors) == 0 {
t.Error("Expected non-critical error but got none")
}
- if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
- t.Errorf("Expected no non-critical error but got: %v", initResult.NonCriticalErr)
+ if !tt.expectNonCriticalError && len(initResult.WeigherErrors) > 0 {
+ t.Errorf("Unexpected non-critical errors: %v", initResult.WeigherErrors)
}
})
}
@@ -687,8 +687,8 @@ func TestFilterWeigherPipelineController_ProcessNewDecisionFromAPI(t *testing.T)
},
Spec: tt.pipeline.Spec,
})
- if initResult.CriticalErr != nil || initResult.NonCriticalErr != nil {
- t.Fatalf("Failed to init pipeline: %v", initResult)
+ if len(initResult.FilterErrors) > 0 || len(initResult.WeigherErrors) > 0 {
+ t.Fatalf("Failed to initialize pipeline: filter errors: %v, weigher errors: %v", initResult.FilterErrors, initResult.WeigherErrors)
}
controller.Pipelines[tt.pipeline.Name] = initResult.Pipeline
}
diff --git a/internal/scheduling/pods/filter_weigher_pipeline_controller_test.go b/internal/scheduling/pods/filter_weigher_pipeline_controller_test.go
index 01529e12b..9e4fffed1 100644
--- a/internal/scheduling/pods/filter_weigher_pipeline_controller_test.go
+++ b/internal/scheduling/pods/filter_weigher_pipeline_controller_test.go
@@ -232,20 +232,18 @@ func TestFilterWeigherPipelineController_InitPipeline(t *testing.T) {
},
})
- if tt.expectCriticalError && initResult.CriticalErr == nil {
+ if tt.expectCriticalError && len(initResult.FilterErrors) == 0 {
t.Error("expected critical error but got none")
}
-
- if !tt.expectCriticalError && initResult.CriticalErr != nil {
- t.Errorf("expected no critical error, got: %v", initResult.CriticalErr)
+ if !tt.expectCriticalError && len(initResult.FilterErrors) > 0 {
+ t.Errorf("unexpected critical error: %v", initResult.FilterErrors)
}
- if tt.expectNonCriticalError && initResult.NonCriticalErr == nil {
+ if tt.expectNonCriticalError && len(initResult.WeigherErrors) == 0 {
t.Error("expected non-critical error but got none")
}
-
- if !tt.expectNonCriticalError && initResult.NonCriticalErr != nil {
- t.Errorf("expected no non-critical error, got: %v", initResult.NonCriticalErr)
+ if !tt.expectNonCriticalError && len(initResult.WeigherErrors) > 0 {
+ t.Errorf("unexpected non-critical error: %v", initResult.WeigherErrors)
}
})
}