-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathcreation.go
More file actions
398 lines (350 loc) · 17.2 KB
/
creation.go
File metadata and controls
398 lines (350 loc) · 17.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
package cluster
import (
"context"
"time"
"github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" //nolint:staticcheck // dot import for test readability
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi"
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client"
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/helper"
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/labels"
)
var _ = ginkgo.Describe("[Suite: cluster][baseline] Cluster Resource Type Lifecycle",
ginkgo.Label(labels.Tier0),
func() {
var h *helper.Helper
var clusterID string
var clusterName string
ginkgo.BeforeEach(func(ctx context.Context) {
h = helper.New()
// Create cluster for all tests in this suite
cluster, err := h.Client.CreateClusterFromPayload(ctx, h.TestDataPath("payloads/clusters/cluster-request.json"))
Expect(err).NotTo(HaveOccurred(), "failed to create cluster")
Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated")
Expect(cluster.Name).NotTo(BeEmpty(), "cluster name should be present")
clusterID = *cluster.Id
clusterName = cluster.Name
ginkgo.GinkgoWriter.Printf("Created cluster ID: %s, Name: %s\n", clusterID, clusterName)
})
ginkgo.Describe("Basic Workflow Validation", ginkgo.Label(labels.Tier0), func() {
// This test validates the end-to-end cluster lifecycle workflow:
// 1. Cluster creation via API with initial condition validation
// 2. Required adapter execution with comprehensive metadata validation
// 3. Final cluster state verification (Ready and Available conditions)
ginkgo.It("should validate complete workflow from creation to Ready state",
func(ctx context.Context) {
ginkgo.By("Verify initial status of cluster")
// Verify initial conditions are False, indicating workflow has not completed yet
// This ensures the cluster starts in the correct initial state
cluster, err := h.Client.GetCluster(ctx, clusterID)
Expect(err).NotTo(HaveOccurred(), "failed to get cluster")
Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present")
hasReadyFalse := h.HasResourceCondition(cluster.Status.Conditions,
client.ConditionTypeReady, openapi.ResourceConditionStatusFalse)
Expect(hasReadyFalse).To(BeTrue(),
"initial cluster conditions should have Ready=False")
hasAvailableFalse := h.HasResourceCondition(cluster.Status.Conditions,
client.ConditionTypeAvailable, openapi.ResourceConditionStatusFalse)
Expect(hasAvailableFalse).To(BeTrue(),
"initial cluster conditions should have Available=False")
ginkgo.By("Verify required adapter execution results")
// Validate required adapters from config have completed successfully
// If an adapter fails, we can identify which specific adapter failed
Eventually(func(g Gomega) {
statuses, err := h.Client.GetClusterStatuses(ctx, clusterID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses")
g.Expect(statuses.Items).NotTo(BeEmpty(), "at least one adapter should have executed")
// Build a map of adapter statuses for easy lookup
adapterMap := make(map[string]openapi.AdapterStatus)
for _, adapter := range statuses.Items {
adapterMap[adapter.Adapter] = adapter
}
// Validate each required adapter from config
for _, requiredAdapter := range h.Cfg.Adapters.Cluster {
adapter, exists := adapterMap[requiredAdapter]
g.Expect(exists).To(BeTrue(),
"required adapter %s should be present in adapter statuses", requiredAdapter)
// Validate adapter-level metadata
g.Expect(adapter.CreatedTime).NotTo(BeZero(),
"adapter %s should have valid created_time", adapter.Adapter)
g.Expect(adapter.LastReportTime).NotTo(BeZero(),
"adapter %s should have valid last_report_time", adapter.Adapter)
g.Expect(adapter.ObservedGeneration).To(Equal(int32(1)),
"adapter %s should have observed_generation=1 for new creation request", adapter.Adapter)
hasApplied := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeApplied,
openapi.AdapterConditionStatusTrue,
)
g.Expect(hasApplied).To(BeTrue(),
"adapter %s should have Applied=True", adapter.Adapter)
hasAvailable := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeAvailable,
openapi.AdapterConditionStatusTrue,
)
g.Expect(hasAvailable).To(BeTrue(),
"adapter %s should have Available=True", adapter.Adapter)
hasHealth := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeHealth,
openapi.AdapterConditionStatusTrue,
)
g.Expect(hasHealth).To(BeTrue(),
"adapter %s should have Health=True", adapter.Adapter)
// Validate condition metadata for each condition
for _, condition := range adapter.Conditions {
g.Expect(condition.Reason).NotTo(BeNil(),
"adapter %s condition %s should have non-nil reason", adapter.Adapter, condition.Type)
g.Expect(*condition.Reason).NotTo(BeEmpty(),
"adapter %s condition %s should have non-empty reason", adapter.Adapter, condition.Type)
g.Expect(condition.Message).NotTo(BeNil(),
"adapter %s condition %s should have non-nil message", adapter.Adapter, condition.Type)
g.Expect(*condition.Message).NotTo(BeEmpty(),
"adapter %s condition %s should have non-empty message", adapter.Adapter, condition.Type)
g.Expect(condition.LastTransitionTime).NotTo(BeZero(),
"adapter %s condition %s should have valid last_transition_time", adapter.Adapter, condition.Type)
}
}
}, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed())
ginkgo.By("Verify final cluster state")
// Wait for cluster Ready condition and verify both Ready and Available conditions are True
// This confirms the cluster has reached the desired end state
err = h.WaitForClusterCondition(
ctx,
clusterID,
client.ConditionTypeReady,
openapi.ResourceConditionStatusTrue,
h.Cfg.Timeouts.Cluster.Ready,
)
Expect(err).NotTo(HaveOccurred(), "cluster Ready condition should transition to True")
finalCluster, err := h.Client.GetCluster(ctx, clusterID)
Expect(err).NotTo(HaveOccurred(), "failed to get final cluster state")
Expect(finalCluster.Status).NotTo(BeNil(), "cluster status should be present")
hasReady := h.HasResourceCondition(finalCluster.Status.Conditions,
client.ConditionTypeReady, openapi.ResourceConditionStatusTrue)
Expect(hasReady).To(BeTrue(), "cluster should have Ready=True condition")
hasAvailable := h.HasResourceCondition(finalCluster.Status.Conditions,
client.ConditionTypeAvailable, openapi.ResourceConditionStatusTrue)
Expect(hasAvailable).To(BeTrue(), "cluster should have Available=True condition")
// Validate observedGeneration for Ready and Available conditions
for _, condition := range finalCluster.Status.Conditions {
if condition.Type == client.ConditionTypeReady || condition.Type == client.ConditionTypeAvailable {
Expect(condition.ObservedGeneration).To(Equal(int32(1)),
"cluster condition %s should have observed_generation=1 for new creation request", condition.Type)
}
}
// Validate adapter-specific conditions in cluster status
// Each required adapter should report its own condition type (e.g., ClNamespaceSuccessful, ClJobSuccessful)
for _, adapterName := range h.Cfg.Adapters.Cluster {
expectedCondType := h.AdapterNameToConditionType(adapterName)
hasAdapterCondition := h.HasResourceCondition(
finalCluster.Status.Conditions,
expectedCondType,
openapi.ResourceConditionStatusTrue,
)
Expect(hasAdapterCondition).To(BeTrue(),
"cluster should have %s=True condition for adapter %s",
expectedCondType, adapterName)
}
})
})
ginkgo.Describe("K8s Resources Check Aligned with Preinstalled Clusters Related Adapters Specified", ginkgo.Label(labels.Tier0), func() {
// This test validates Kubernetes resource creation for adapters that create K8s resources:
// 1. Direct K8s resource verification for each adapter (namespace, job, deployment)
// 2. Validation of resource metadata (labels, annotations) and status
// 3. Final cluster state verification
//
// Note: Not all adapters create K8s resources (e.g., cl-maestro interacts with Maestro service).
// Adapters without K8s resources are verified via adapter status in "Basic Workflow Validation" test.
ginkgo.It("should create Kubernetes resources with correct templated values for adapters that create K8s resources",
func(ctx context.Context) {
ginkgo.By("Verify Kubernetes resources for each required adapter")
// Map from adapter name to K8s resource verification function.
// Labels are used to FIND resources (via kubectl label selector).
// Annotations are explicitly verified since they're not used in selectors.
adapterResourceVerifiers := map[string]func() error{
"cl-namespace": func() error {
expectedLabels := map[string]string{
"hyperfleet.io/cluster-id": clusterID,
"hyperfleet.io/cluster-name": clusterName,
"e2e.hyperfleet.io/managed-by": "test-framework",
}
expectedAnnotations := map[string]string{
"hyperfleet.io/generation": "1",
}
return h.VerifyNamespaceActive(ctx, clusterID, expectedLabels, expectedAnnotations)
},
"cl-job": func() error {
expectedLabels := map[string]string{
"hyperfleet.io/cluster-id": clusterID,
"hyperfleet.io/resource-type": "job",
}
expectedAnnotations := map[string]string{
"hyperfleet.io/generation": "1",
}
return h.VerifyJobComplete(ctx, clusterID, expectedLabels, expectedAnnotations)
},
"cl-deployment": func() error {
expectedLabels := map[string]string{
"hyperfleet.io/cluster-id": clusterID,
"hyperfleet.io/resource-type": "deployment",
}
expectedAnnotations := map[string]string{
"hyperfleet.io/generation": "1",
}
return h.VerifyDeploymentAvailable(ctx, clusterID, expectedLabels, expectedAnnotations)
},
}
// Verify K8s resources only for adapters that have verifiers defined
// This explicitly tests only adapters that create K8s resources
for adapterName, verifier := range adapterResourceVerifiers {
ginkgo.By("Verifying Kubernetes resource for adapter: " + adapterName)
Eventually(func() error {
return verifier()
}, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed(),
"Kubernetes resource for adapter %s should be created and reach desired state", adapterName)
ginkgo.GinkgoWriter.Printf("Successfully verified K8s resource for adapter: %s\n", adapterName)
}
ginkgo.By("Verify final cluster state to ensure Ready before cleanup")
// Wait for cluster Ready condition to prevent namespace deletion conflicts
// Without this, adapters may still be creating resources during cleanup
err := h.WaitForClusterCondition(
ctx,
clusterID,
client.ConditionTypeReady,
openapi.ResourceConditionStatusTrue,
h.Cfg.Timeouts.Cluster.Ready,
)
Expect(err).NotTo(HaveOccurred(), "cluster Ready condition should transition to True before cleanup")
})
})
ginkgo.Describe("Adapter Dependency Relationships Workflow Validation", ginkgo.Label(labels.Tier0), func() {
// This test validates adapter dependency relationships:
// 1. During cl-job execution: cl-deployment Applied=False and Available=Unknown (never False)
// 2. After cl-job completes: cl-deployment can proceed (no validation on Available during execution)
// 3. Eventually: cl-deployment Available becomes True (success)
ginkgo.It("should validate cl-deployment dependency on cl-job with comprehensive condition checks",
func(ctx context.Context) {
pollingInterval := "1s"
ginkgo.By("Verify cl-deployment initial state and dependency waiting behavior")
// Capture cl-deployment's initial waiting state
// Poll until cl-deployment appears in the statuses
var foundInitialState bool
Eventually(func(g Gomega) {
foundInitialState = false
statuses, err := h.Client.GetClusterStatuses(ctx, clusterID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses")
// Find cl-deployment adapter
for _, adapter := range statuses.Items {
if adapter.Adapter == "cl-deployment" {
foundInitialState = true
// Verify initial waiting state
hasAppliedFalse := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeApplied,
openapi.AdapterConditionStatusFalse,
)
g.Expect(hasAppliedFalse).To(BeTrue(),
"cl-deployment Applied condition should be False initially (waiting for cl-job)")
hasAvailableUnknown := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeAvailable,
openapi.AdapterConditionStatusUnknown,
)
g.Expect(hasAvailableUnknown).To(BeTrue(),
"cl-deployment Available condition should be Unknown initially (waiting for cl-job)")
hasHealthTrue := h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeHealth,
openapi.AdapterConditionStatusTrue,
)
g.Expect(hasHealthTrue).To(BeTrue(),
"cl-deployment Health condition should be True (adapter is healthy, just waiting)")
return
}
}
g.Expect(foundInitialState).To(BeTrue(), "cl-deployment adapter should appear in statuses")
}, h.Cfg.Timeouts.Adapter.Processing, pollingInterval).Should(Succeed())
ginkgo.By("Verify dependency: cl-deployment Applied=False and Available=Unknown during cl-job execution")
// Poll continuously until cl-deployment Available becomes True:
// - Before cl-job Available=True: verify cl-deployment Applied=False and Available!=False
// - After cl-job Available=True: only wait for cl-deployment Available=True
// - Exit when cl-deployment Available=True
timeout := time.After(h.Cfg.Timeouts.Adapter.Processing)
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
var jobAvailableReachedTrue bool
pollLoop:
for {
select {
case <-timeout:
ginkgo.Fail("Timed out waiting for cl-deployment Available condition to become True")
case <-ticker.C:
statuses, err := h.Client.GetClusterStatuses(ctx, clusterID)
Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses")
var jobAvailableTrue bool
var deploymentAppliedTrue bool
var deploymentAvailableTrue bool
var deploymentAvailableFalse bool
for _, adapter := range statuses.Items {
if adapter.Adapter == "cl-job" {
jobAvailableTrue = h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeAvailable,
openapi.AdapterConditionStatusTrue,
)
}
if adapter.Adapter == "cl-deployment" {
deploymentAppliedTrue = h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeApplied,
openapi.AdapterConditionStatusTrue,
)
deploymentAvailableTrue = h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeAvailable,
openapi.AdapterConditionStatusTrue,
)
deploymentAvailableFalse = h.HasAdapterCondition(
adapter.Conditions,
client.ConditionTypeAvailable,
openapi.AdapterConditionStatusFalse,
)
}
}
// Track when cl-job Available first becomes True
if jobAvailableTrue && !jobAvailableReachedTrue {
jobAvailableReachedTrue = true
ginkgo.GinkgoWriter.Printf("cl-job Available=True reached, cl-deployment can now proceed\n")
}
// Validate dependency enforcement: only check while cl-job is still executing
if !jobAvailableReachedTrue {
// cl-deployment should not start applying resources until cl-job completes
Expect(deploymentAppliedTrue).To(BeFalse(),
"cl-deployment Applied should remain False while cl-job Available is not True yet")
// cl-deployment Available should stay Unknown (not False) while waiting for cl-job
Expect(deploymentAvailableFalse).To(BeFalse(),
"cl-deployment Available must be Unknown (not False) during cl-job execution")
}
// Exit when cl-deployment Available becomes True (workflow complete)
if deploymentAvailableTrue {
ginkgo.GinkgoWriter.Printf("cl-deployment Available=True reached, dependency validation successful\n")
break pollLoop
}
}
}
ginkgo.GinkgoWriter.Printf("Successfully validated cl-deployment dependency on cl-job with correct condition transitions\n")
})
})
ginkgo.AfterEach(func(ctx context.Context) {
// Skip cleanup if helper not initialized or no cluster created
if h == nil || clusterID == "" {
return
}
ginkgo.By("cleaning up cluster " + clusterID)
err := h.CleanupTestCluster(ctx, clusterID)
Expect(err).NotTo(HaveOccurred(), "failed to cleanup cluster %s", clusterID)
})
},
)