diff --git a/cmd/wfctl/deploy_state.go b/cmd/wfctl/deploy_state.go new file mode 100644 index 00000000..0fc544d4 --- /dev/null +++ b/cmd/wfctl/deploy_state.go @@ -0,0 +1,193 @@ +package main + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "os" + "time" + + "github.com/GoCodeAlone/workflow/config" +) + +const deployStateSchemaVersion = 1 + +// DeployedModuleState records what was deployed for a single module. +type DeployedModuleState struct { + // Type is the module type string (e.g. "storage.sqlite"). + Type string `json:"type"` + // Stateful indicates whether this module manages persistent state. + Stateful bool `json:"stateful"` + // ResourceID is the infrastructure resource identifier generated for this + // module (e.g. "database/prod-orders-db"). + ResourceID string `json:"resourceId,omitempty"` + // Config is a snapshot of the module's config at deploy time. + Config map[string]any `json:"config,omitempty"` +} + +// DeployedPipelineState records what was deployed for a single pipeline. +type DeployedPipelineState struct { + // Trigger is the pipeline trigger type (e.g. "http"). + Trigger string `json:"trigger"` + // Path is the HTTP path if the trigger is HTTP-based. + Path string `json:"path,omitempty"` + // Method is the HTTP method if the trigger is HTTP-based. + Method string `json:"method,omitempty"` +} + +// DeployedResources is the top-level resource map inside a DeploymentState. +type DeployedResources struct { + Modules map[string]DeployedModuleState `json:"modules,omitempty"` + Pipelines map[string]DeployedPipelineState `json:"pipelines,omitempty"` +} + +// DeploymentState is the full state manifest written after a successful deploy. +// It is serialised to deployment.state.json alongside the workflow config. +type DeploymentState struct { + // Version is the manifest format version (currently "1"). + Version string `json:"version"` + // ConfigHash is a SHA-256 hex digest of the config file contents at deploy time. + ConfigHash string `json:"configHash"` + // DeployedAt is the RFC 3339 timestamp of the deployment. + DeployedAt time.Time `json:"deployedAt"` + // ConfigFile is the path to the workflow config file that was deployed. + ConfigFile string `json:"configFile"` + // Resources contains per-module and per-pipeline state records. + Resources DeployedResources `json:"resources"` + // SchemaVersion is an integer version for the state file format. + SchemaVersion int `json:"schemaVersion"` + // Migrations lists migration IDs that have been applied. + Migrations []string `json:"migrations,omitempty"` +} + +// SaveState writes the DeploymentState to a JSON file at path. +func SaveState(state *DeploymentState, path string) error { + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return fmt.Errorf("marshal deployment state: %w", err) + } + if err := os.WriteFile(path, data, 0640); err != nil { //nolint:gosec // G306: deploy state file + return fmt.Errorf("write deployment state: %w", err) + } + return nil +} + +// LoadState reads and deserialises a DeploymentState from a JSON file at path. +// Returns an error if the file does not exist or cannot be parsed. +func LoadState(path string) (*DeploymentState, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read deployment state: %w", err) + } + var state DeploymentState + if err := json.Unmarshal(data, &state); err != nil { + return nil, fmt.Errorf("parse deployment state: %w", err) + } + return &state, nil +} + +// BuildStateFromConfig constructs a DeploymentState from a loaded WorkflowConfig. +// configFile is the original config file path (used for display and hashing). +// namespace is used when generating resource IDs (may be empty). +// migrations is the optional list of already-applied migration IDs. +func BuildStateFromConfig(cfg *config.WorkflowConfig, configFile, namespace string, migrations []string) (*DeploymentState, error) { + // Hash the config file if it exists. + configHash := "" + if configFile != "" { + h, err := hashFile(configFile) + if err == nil { + configHash = "sha256:" + h + } + } + + state := &DeploymentState{ + Version: "1", + ConfigHash: configHash, + DeployedAt: time.Now().UTC(), + ConfigFile: configFile, + SchemaVersion: deployStateSchemaVersion, + Migrations: migrations, + Resources: DeployedResources{ + Modules: make(map[string]DeployedModuleState), + Pipelines: make(map[string]DeployedPipelineState), + }, + } + + // Populate modules. + for _, mod := range cfg.Modules { + stateful := IsStateful(mod.Type) + resourceID := "" + if stateful { + resourceID = GenerateResourceID(mod.Name, mod.Type, namespace) + } + + // Deep-copy the config map so mutations to the original don't bleed in. + var cfgCopy map[string]any + if len(mod.Config) > 0 { + cfgCopy = make(map[string]any, len(mod.Config)) + for k, v := range mod.Config { + cfgCopy[k] = v + } + } + + state.Resources.Modules[mod.Name] = DeployedModuleState{ + Type: mod.Type, + Stateful: stateful, + ResourceID: resourceID, + Config: cfgCopy, + } + } + + // Populate pipelines. + for name, raw := range cfg.Pipelines { + pipelineMap, ok := raw.(map[string]any) + if !ok { + continue + } + ps := buildPipelineState(pipelineMap) + state.Resources.Pipelines[name] = ps + } + + return state, nil +} + +// buildPipelineState extracts trigger metadata from a raw pipeline map. +func buildPipelineState(pipelineMap map[string]any) DeployedPipelineState { + ps := DeployedPipelineState{} + + triggerRaw, ok := pipelineMap["trigger"] + if !ok { + return ps + } + + triggerMap, ok := triggerRaw.(map[string]any) + if !ok { + return ps + } + + ps.Trigger, _ = triggerMap["type"].(string) + + cfgRaw, ok := triggerMap["config"] + if !ok { + return ps + } + triggerCfg, ok := cfgRaw.(map[string]any) + if !ok { + return ps + } + + ps.Path, _ = triggerCfg["path"].(string) + ps.Method, _ = triggerCfg["method"].(string) + + return ps +} + +// hashFile computes a hex-encoded SHA-256 digest of the file at path. +func hashFile(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + sum := sha256.Sum256(data) + return fmt.Sprintf("%x", sum), nil +} diff --git a/cmd/wfctl/deploy_state_test.go b/cmd/wfctl/deploy_state_test.go new file mode 100644 index 00000000..ed6af68d --- /dev/null +++ b/cmd/wfctl/deploy_state_test.go @@ -0,0 +1,293 @@ +package main + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/GoCodeAlone/workflow/config" +) + +// --- SaveState / LoadState round-trip --- + +func TestSaveAndLoadState(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "deployment.state.json") + + original := &DeploymentState{ + Version: "1", + ConfigHash: "sha256:abc123", + DeployedAt: time.Now().UTC().Truncate(time.Second), + ConfigFile: "app.yaml", + SchemaVersion: 1, + Migrations: []string{"001_initial"}, + Resources: DeployedResources{ + Modules: map[string]DeployedModuleState{ + "orders-db": { + Type: "storage.sqlite", + Stateful: true, + ResourceID: "database/orders-db", + Config: map[string]any{"dbPath": "/data/orders.db"}, + }, + }, + Pipelines: map[string]DeployedPipelineState{ + "create-order": {Trigger: "http", Path: "/api/v1/orders", Method: "POST"}, + }, + }, + } + + if err := SaveState(original, path); err != nil { + t.Fatalf("SaveState failed: %v", err) + } + + loaded, err := LoadState(path) + if err != nil { + t.Fatalf("LoadState failed: %v", err) + } + + if loaded.Version != original.Version { + t.Errorf("Version: got %q, want %q", loaded.Version, original.Version) + } + if loaded.ConfigHash != original.ConfigHash { + t.Errorf("ConfigHash: got %q, want %q", loaded.ConfigHash, original.ConfigHash) + } + if loaded.SchemaVersion != original.SchemaVersion { + t.Errorf("SchemaVersion: got %d, want %d", loaded.SchemaVersion, original.SchemaVersion) + } + if len(loaded.Migrations) != 1 || loaded.Migrations[0] != "001_initial" { + t.Errorf("Migrations: got %v, want [001_initial]", loaded.Migrations) + } + + mod, ok := loaded.Resources.Modules["orders-db"] + if !ok { + t.Fatal("expected orders-db module in loaded state") + } + if !mod.Stateful { + t.Error("expected orders-db to be stateful") + } + if mod.ResourceID != "database/orders-db" { + t.Errorf("ResourceID: got %q, want %q", mod.ResourceID, "database/orders-db") + } + + pl, ok := loaded.Resources.Pipelines["create-order"] + if !ok { + t.Fatal("expected create-order pipeline in loaded state") + } + if pl.Method != "POST" { + t.Errorf("Pipeline method: got %q, want POST", pl.Method) + } +} + +func TestLoadStateMissingFile(t *testing.T) { + _, err := LoadState("/nonexistent/path/deployment.state.json") + if err == nil { + t.Fatal("expected error for missing state file") + } +} + +func TestLoadStateInvalidJSON(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "bad.json") + if err := os.WriteFile(path, []byte("not json {{{"), 0644); err != nil { + t.Fatal(err) + } + _, err := LoadState(path) + if err == nil { + t.Fatal("expected error for invalid JSON") + } +} + +// --- BuildStateFromConfig --- + +func TestBuildStateFromConfigModules(t *testing.T) { + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{ + {Name: "orders-db", Type: "storage.sqlite", Config: map[string]any{"dbPath": "/data/orders.db"}}, + {Name: "event-broker", Type: "messaging.broker"}, + {Name: "http-server", Type: "http.server", Config: map[string]any{"address": ":8080"}}, + {Name: "cache", Type: "cache.redis"}, + }, + Pipelines: map[string]any{}, + } + + state, err := BuildStateFromConfig(cfg, "", "prod", nil) + if err != nil { + t.Fatalf("BuildStateFromConfig failed: %v", err) + } + + if state.Version != "1" { + t.Errorf("Version: got %q, want 1", state.Version) + } + if state.SchemaVersion != 1 { + t.Errorf("SchemaVersion: got %d, want 1", state.SchemaVersion) + } + + ordersDB := state.Resources.Modules["orders-db"] + if !ordersDB.Stateful { + t.Error("orders-db should be stateful") + } + if ordersDB.ResourceID != "database/prod-orders-db" { + t.Errorf("orders-db ResourceID: got %q, want %q", ordersDB.ResourceID, "database/prod-orders-db") + } + + broker := state.Resources.Modules["event-broker"] + if !broker.Stateful { + t.Error("event-broker should be stateful") + } + + server := state.Resources.Modules["http-server"] + if server.Stateful { + t.Error("http-server should NOT be stateful") + } + if server.ResourceID != "" { + t.Errorf("http-server should have no ResourceID, got %q", server.ResourceID) + } + + cache := state.Resources.Modules["cache"] + if cache.Stateful { + t.Error("cache.redis should NOT be stateful (ephemeral by default)") + } +} + +func TestBuildStateFromConfigPipelines(t *testing.T) { + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{}, + Pipelines: map[string]any{ + "create-order": map[string]any{ + "trigger": map[string]any{ + "type": "http", + "config": map[string]any{ + "path": "/api/v1/orders", + "method": "POST", + }, + }, + "steps": []any{}, + }, + "list-orders": map[string]any{ + "trigger": map[string]any{ + "type": "http", + "config": map[string]any{ + "path": "/api/v1/orders", + "method": "GET", + }, + }, + "steps": []any{}, + }, + }, + } + + state, err := BuildStateFromConfig(cfg, "", "", nil) + if err != nil { + t.Fatalf("BuildStateFromConfig failed: %v", err) + } + + if len(state.Resources.Pipelines) != 2 { + t.Fatalf("expected 2 pipelines, got %d", len(state.Resources.Pipelines)) + } + + createOrder := state.Resources.Pipelines["create-order"] + if createOrder.Trigger != "http" { + t.Errorf("create-order Trigger: got %q, want http", createOrder.Trigger) + } + if createOrder.Path != "/api/v1/orders" { + t.Errorf("create-order Path: got %q, want /api/v1/orders", createOrder.Path) + } + if createOrder.Method != "POST" { + t.Errorf("create-order Method: got %q, want POST", createOrder.Method) + } +} + +func TestBuildStateFromConfigWithFile(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "app.yaml") + if err := os.WriteFile(cfgPath, []byte("modules: []\n"), 0644); err != nil { + t.Fatal(err) + } + + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{}, + Pipelines: map[string]any{}, + } + + state, err := BuildStateFromConfig(cfg, cfgPath, "", nil) + if err != nil { + t.Fatalf("BuildStateFromConfig failed: %v", err) + } + + if state.ConfigFile != cfgPath { + t.Errorf("ConfigFile: got %q, want %q", state.ConfigFile, cfgPath) + } + if state.ConfigHash == "" { + t.Error("expected non-empty ConfigHash when config file exists") + } + if len(state.ConfigHash) < 8 || state.ConfigHash[:7] != "sha256:" { + t.Errorf("ConfigHash should start with sha256:, got %q", state.ConfigHash) + } +} + +func TestBuildStateFromConfigMigrations(t *testing.T) { + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{}, + Pipelines: map[string]any{}, + } + migrations := []string{"001_initial", "002_add_index"} + state, err := BuildStateFromConfig(cfg, "", "", migrations) + if err != nil { + t.Fatalf("BuildStateFromConfig failed: %v", err) + } + if len(state.Migrations) != 2 { + t.Fatalf("expected 2 migrations, got %d", len(state.Migrations)) + } +} + +func TestBuildStateConfigCopyIsolation(t *testing.T) { + originalConfig := map[string]any{"dbPath": "/data/orders.db"} + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{ + {Name: "db", Type: "storage.sqlite", Config: originalConfig}, + }, + Pipelines: map[string]any{}, + } + + state, err := BuildStateFromConfig(cfg, "", "", nil) + if err != nil { + t.Fatalf("BuildStateFromConfig failed: %v", err) + } + + // Mutate the original config — the state copy should be unaffected. + originalConfig["dbPath"] = "/data/mutated.db" + + snapshotPath, _ := state.Resources.Modules["db"].Config["dbPath"].(string) + if snapshotPath != "/data/orders.db" { + t.Errorf("state config was mutated: got %q, want /data/orders.db", snapshotPath) + } +} + +func TestSaveStateIsValidJSON(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "deployment.state.json") + + state := &DeploymentState{ + Version: "1", + SchemaVersion: 1, + Resources: DeployedResources{ + Modules: make(map[string]DeployedModuleState), + Pipelines: make(map[string]DeployedPipelineState), + }, + } + + if err := SaveState(state, path); err != nil { + t.Fatalf("SaveState failed: %v", err) + } + + data, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + var v map[string]any + if err := json.Unmarshal(data, &v); err != nil { + t.Fatalf("saved state is not valid JSON: %v", err) + } +} diff --git a/cmd/wfctl/diff.go b/cmd/wfctl/diff.go new file mode 100644 index 00000000..d0cf4cf2 --- /dev/null +++ b/cmd/wfctl/diff.go @@ -0,0 +1,465 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "sort" + "strings" + + "github.com/GoCodeAlone/workflow/config" +) + +func runDiff(args []string) error { + fs := flag.NewFlagSet("diff", flag.ContinueOnError) + stateFile := fs.String("state", "", "Path to deployment state file for resource correlation") + format := fs.String("format", "text", "Output format: text or json") + checkBreaking := fs.Bool("check-breaking", false, "Warn about breaking changes (removed stateful modules, changed types)") + fs.Usage = func() { + fmt.Fprintf(fs.Output(), `Usage: wfctl diff [options] + +Compare two workflow configuration files and show what changed. + +Options: +`) + fs.PrintDefaults() + } + if err := fs.Parse(args); err != nil { + return err + } + + if fs.NArg() < 2 { + fs.Usage() + return fmt.Errorf("two config files are required: ") + } + + oldPath := fs.Arg(0) + newPath := fs.Arg(1) + + oldCfg, err := config.LoadFromFile(oldPath) + if err != nil { + return fmt.Errorf("load old config %q: %w", oldPath, err) + } + newCfg, err := config.LoadFromFile(newPath) + if err != nil { + return fmt.Errorf("load new config %q: %w", newPath, err) + } + + // Optionally load the deployment state for resource correlation. + var state *DeploymentState + if *stateFile != "" { + state, err = LoadState(*stateFile) + if err != nil { + return fmt.Errorf("load state file %q: %w", *stateFile, err) + } + } + + result := diffConfigs(oldCfg, newCfg, state) + + switch *format { + case "json": + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + if err := enc.Encode(result); err != nil { + return fmt.Errorf("encode diff result: %w", err) + } + default: + printDiffText(result) + } + + if *checkBreaking && len(result.BreakingChanges) > 0 { + return fmt.Errorf("%d breaking change(s) detected", len(result.BreakingChanges)) + } + return nil +} + +// --- Data types for diff output --- + +// DiffStatus describes what happened to a named element between two configs. +type DiffStatus string + +const ( + DiffStatusAdded DiffStatus = "added" + DiffStatusRemoved DiffStatus = "removed" + DiffStatusChanged DiffStatus = "changed" + DiffStatusUnchanged DiffStatus = "unchanged" +) + +// ModuleDiff captures the diff for a single module. +type ModuleDiff struct { + Name string `json:"name"` + Status DiffStatus `json:"status"` + Type string `json:"type,omitempty"` + Stateful bool `json:"stateful"` + // Detail holds a human-readable description of what changed. + Detail string `json:"detail,omitempty"` + // ResourceID is the correlated infrastructure resource ID from state, if known. + ResourceID string `json:"resourceId,omitempty"` + // BreakingChanges lists data-loss risks for this module. + BreakingChanges []BreakingChange `json:"breakingChanges,omitempty"` +} + +// PipelineDiff captures the diff for a single pipeline. +type PipelineDiff struct { + Name string `json:"name"` + Status DiffStatus `json:"status"` + // Trigger describes the pipeline trigger (e.g. "http POST /api/v1/orders"). + Trigger string `json:"trigger,omitempty"` + // Detail holds a human-readable description of what changed. + Detail string `json:"detail,omitempty"` +} + +// BreakingChangeSummary aggregates breaking-change warnings across the diff. +type BreakingChangeSummary struct { + ModuleName string `json:"moduleName"` + Changes []BreakingChange `json:"changes"` +} + +// DiffResult is the full structured output of diffConfigs. +type DiffResult struct { + OldConfig string `json:"oldConfig"` + NewConfig string `json:"newConfig"` + Modules []ModuleDiff `json:"modules"` + Pipelines []PipelineDiff `json:"pipelines"` + BreakingChanges []BreakingChangeSummary `json:"breakingChanges,omitempty"` +} + +// --- Core diffing logic --- + +// diffConfigs produces a DiffResult comparing two workflow configs. +// state is optional; when provided, resource IDs are correlated from it. +func diffConfigs(oldCfg, newCfg *config.WorkflowConfig, state *DeploymentState) DiffResult { + result := DiffResult{} + + // Index old modules by name. + oldModules := make(map[string]*config.ModuleConfig, len(oldCfg.Modules)) + for i := range oldCfg.Modules { + m := &oldCfg.Modules[i] + oldModules[m.Name] = m + } + newModules := make(map[string]*config.ModuleConfig, len(newCfg.Modules)) + for i := range newCfg.Modules { + m := &newCfg.Modules[i] + newModules[m.Name] = m + } + + // Collect all module names, sorted for deterministic output. + allModuleNames := unionKeys(oldModules, newModules) + sort.Strings(allModuleNames) + + for _, name := range allModuleNames { + oldMod := oldModules[name] + newMod := newModules[name] + diff := diffModule(name, oldMod, newMod, state) + result.Modules = append(result.Modules, diff) + + if len(diff.BreakingChanges) > 0 { + result.BreakingChanges = append(result.BreakingChanges, BreakingChangeSummary{ + ModuleName: name, + Changes: diff.BreakingChanges, + }) + } + } + + // Pipelines. + oldPipelines := normalisePipelines(oldCfg.Pipelines) + newPipelines := normalisePipelines(newCfg.Pipelines) + + allPipelineNames := unionStringKeys(oldPipelines, newPipelines) + sort.Strings(allPipelineNames) + + for _, name := range allPipelineNames { + oldP, hasOld := oldPipelines[name] + newP, hasNew := newPipelines[name] + result.Pipelines = append(result.Pipelines, diffPipeline(name, oldP, hasOld, newP, hasNew)) + } + + return result +} + +// diffModule computes the diff for a single module entry. +func diffModule(name string, oldMod, newMod *config.ModuleConfig, state *DeploymentState) ModuleDiff { + d := ModuleDiff{Name: name} + + // Correlate resource ID from state if available. + if state != nil { + if ms, ok := state.Resources.Modules[name]; ok { + d.ResourceID = ms.ResourceID + } + } + + switch { + case oldMod == nil && newMod != nil: + // Added. + d.Status = DiffStatusAdded + d.Type = newMod.Type + d.Stateful = IsStateful(newMod.Type) + d.Detail = "NEW" + + case oldMod != nil && newMod == nil: + // Removed. + d.Status = DiffStatusRemoved + d.Type = oldMod.Type + d.Stateful = IsStateful(oldMod.Type) + if d.Stateful { + d.Detail = "REMOVED — WARNING: stateful resource may still hold data" + } else { + d.Detail = "REMOVED (stateless, safe to remove)" + } + + default: + // Both present — check for changes. + d.Type = newMod.Type + d.Stateful = IsStateful(newMod.Type) + + breaking := DetectBreakingChanges(oldMod, newMod) + configChanged := isConfigChanged(oldMod.Config, newMod.Config) + typeChanged := oldMod.Type != newMod.Type + + switch { + case typeChanged: + d.Status = DiffStatusChanged + d.Detail = fmt.Sprintf("TYPE CHANGED: %s → %s", oldMod.Type, newMod.Type) + d.BreakingChanges = breaking + case len(breaking) > 0: + d.Status = DiffStatusChanged + parts := make([]string, 0, len(breaking)) + for _, bc := range breaking { + parts = append(parts, fmt.Sprintf("%s: %s → %s", bc.Field, describeValue(bc.OldValue), describeValue(bc.NewValue))) + } + d.Detail = "CONFIG CHANGED: " + strings.Join(parts, "; ") + d.BreakingChanges = breaking + case configChanged: + d.Status = DiffStatusChanged + d.Detail = "CONFIG CHANGED" + default: + d.Status = DiffStatusUnchanged + d.Detail = "UNCHANGED" + } + } + + return d +} + +// diffPipeline computes the diff for a single pipeline. +func diffPipeline(name string, oldP map[string]any, hasOld bool, newP map[string]any, hasNew bool) PipelineDiff { + d := PipelineDiff{Name: name} + + switch { + case !hasOld && hasNew: + d.Status = DiffStatusAdded + d.Trigger = describePipelineTrigger(newP) + d.Detail = "NEW" + + case hasOld && !hasNew: + d.Status = DiffStatusRemoved + d.Trigger = describePipelineTrigger(oldP) + d.Detail = "REMOVED" + + default: + d.Trigger = describePipelineTrigger(newP) + + oldSteps := countSteps(oldP) + newSteps := countSteps(newP) + + oldTrigger := describePipelineTrigger(oldP) + newTrigger := describePipelineTrigger(newP) + triggerChanged := oldTrigger != newTrigger + stepsChanged := oldSteps != newSteps + + switch { + case triggerChanged: + d.Status = DiffStatusChanged + d.Detail = fmt.Sprintf("TRIGGER CHANGED: %s → %s", oldTrigger, newTrigger) + case stepsChanged: + d.Status = DiffStatusChanged + d.Detail = fmt.Sprintf("STEPS CHANGED: %d → %d steps", oldSteps, newSteps) + default: + d.Status = DiffStatusUnchanged + d.Detail = "UNCHANGED" + } + } + + return d +} + +// --- Rendering --- + +// statusSymbol returns the one-character prefix for a diff status. +func statusSymbol(s DiffStatus) string { + switch s { + case DiffStatusAdded: + return "+" + case DiffStatusRemoved: + return "-" + case DiffStatusChanged: + return "~" + default: + return "=" + } +} + +// printDiffText writes a human-readable diff report to stdout. +func printDiffText(result DiffResult) { + fmt.Println("Modules:") + for _, m := range result.Modules { + statefulTag := "" + if m.Stateful && m.Status != DiffStatusUnchanged { + statefulTag = " [STATEFUL]" + } + fmt.Printf(" %s %-28s (%-30s) [%s]%s\n", + statusSymbol(m.Status), + m.Name, + moduleTypeLabel(m.Type), + m.Detail, + statefulTag, + ) + if m.ResourceID != "" && m.Status != DiffStatusAdded { + fmt.Printf(" resource: %s\n", m.ResourceID) + } + } + + if len(result.Pipelines) > 0 { + fmt.Println("\nPipelines:") + for _, p := range result.Pipelines { + fmt.Printf(" %s %-28s %-36s [%s]\n", + statusSymbol(p.Status), + p.Name, + fmt.Sprintf("(%s)", p.Trigger), + p.Detail, + ) + } + } + + if len(result.BreakingChanges) > 0 { + fmt.Println("\n[BREAKING CHANGES]") + for _, bc := range result.BreakingChanges { + fmt.Printf(" Module %q:\n", bc.ModuleName) + for _, ch := range bc.Changes { + fmt.Printf(" - %s\n", ch.Message) + if ch.Field != "" && ch.Field != "type" { + fmt.Printf(" This is a STATEFUL module. Data at the old location may be lost.\n") + fmt.Printf(" Recommendation: add a migration step or keep the old value.\n") + } + } + } + } +} + +// --- Helpers --- + +// normalisePipelines converts cfg.Pipelines (map[string]any) to a typed map. +func normalisePipelines(raw map[string]any) map[string]map[string]any { + out := make(map[string]map[string]any, len(raw)) + for name, v := range raw { + if m, ok := v.(map[string]any); ok { + out[name] = m + } else { + // Preserve the entry with an empty map so it still appears in diffs. + out[name] = nil + } + } + return out +} + +// unionKeys returns the union of keys across two string-keyed maps. +func unionKeys[V any](a, b map[string]*V) []string { + seen := make(map[string]struct{}, len(a)+len(b)) + for k := range a { + seen[k] = struct{}{} + } + for k := range b { + seen[k] = struct{}{} + } + keys := make([]string, 0, len(seen)) + for k := range seen { + keys = append(keys, k) + } + return keys +} + +// unionStringKeys returns the union of keys across two map[string]map[string]any. +func unionStringKeys(a, b map[string]map[string]any) []string { + seen := make(map[string]struct{}, len(a)+len(b)) + for k := range a { + seen[k] = struct{}{} + } + for k := range b { + seen[k] = struct{}{} + } + keys := make([]string, 0, len(seen)) + for k := range seen { + keys = append(keys, k) + } + return keys +} + +// isConfigChanged returns true if the two config maps differ in any key or value. +func isConfigChanged(oldCfg, newCfg map[string]any) bool { + if len(oldCfg) != len(newCfg) { + return true + } + for k, ov := range oldCfg { + nv, ok := newCfg[k] + if !ok { + return true + } + if fmt.Sprintf("%v", ov) != fmt.Sprintf("%v", nv) { + return true + } + } + return false +} + +// describePipelineTrigger builds a short trigger description from a raw pipeline map. +func describePipelineTrigger(p map[string]any) string { + if p == nil { + return "unknown" + } + triggerRaw, ok := p["trigger"] + if !ok { + return "unknown" + } + triggerMap, ok := triggerRaw.(map[string]any) + if !ok { + return "unknown" + } + triggerType, _ := triggerMap["type"].(string) + + cfgRaw, ok := triggerMap["config"] + if !ok { + return triggerType + } + triggerCfg, ok := cfgRaw.(map[string]any) + if !ok { + return triggerType + } + + method, _ := triggerCfg["method"].(string) + path, _ := triggerCfg["path"].(string) + + if method != "" && path != "" { + return fmt.Sprintf("%s %s %s", triggerType, method, path) + } + if path != "" { + return fmt.Sprintf("%s %s", triggerType, path) + } + return triggerType +} + +// countSteps returns the number of steps in a raw pipeline map. +func countSteps(p map[string]any) int { + if p == nil { + return 0 + } + stepsRaw, ok := p["steps"] + if !ok { + return 0 + } + steps, ok := stepsRaw.([]any) + if !ok { + return 0 + } + return len(steps) +} diff --git a/cmd/wfctl/diff_test.go b/cmd/wfctl/diff_test.go new file mode 100644 index 00000000..dbc762f4 --- /dev/null +++ b/cmd/wfctl/diff_test.go @@ -0,0 +1,512 @@ +package main + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/config" +) + +// --- diffConfigs tests --- + +func TestDiffConfigsModuleAdded(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server"}, + ) + newCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server"}, + config.ModuleConfig{Name: "new-cache", Type: "cache.redis"}, + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + mod := findModuleDiff(t, result, "new-cache") + if mod.Status != DiffStatusAdded { + t.Errorf("new-cache status: got %q, want %q", mod.Status, DiffStatusAdded) + } +} + +func TestDiffConfigsModuleRemoved(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server"}, + config.ModuleConfig{Name: "old-module", Type: "http.middleware.cors"}, + ) + newCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server"}, + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + mod := findModuleDiff(t, result, "old-module") + if mod.Status != DiffStatusRemoved { + t.Errorf("old-module status: got %q, want %q", mod.Status, DiffStatusRemoved) + } + if !strings.Contains(mod.Detail, "stateless") { + t.Errorf("expected 'stateless' in detail for cors module, got: %s", mod.Detail) + } +} + +func TestDiffConfigsStatefulModuleRemoved(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "orders-db", Type: "storage.sqlite"}, + ) + newCfg := cfgWithModules() + + result := diffConfigs(oldCfg, newCfg, nil) + + mod := findModuleDiff(t, result, "orders-db") + if mod.Status != DiffStatusRemoved { + t.Errorf("orders-db status: got %q, want %q", mod.Status, DiffStatusRemoved) + } + if !mod.Stateful { + t.Error("expected orders-db to be flagged as stateful") + } + if !strings.Contains(mod.Detail, "WARNING") { + t.Errorf("expected WARNING in detail for stateful removal, got: %s", mod.Detail) + } +} + +func TestDiffConfigsModuleUnchanged(t *testing.T) { + mod := config.ModuleConfig{Name: "server", Type: "http.server", Config: map[string]any{"address": ":8080"}} + oldCfg := cfgWithModules(mod) + newCfg := cfgWithModules(mod) + + result := diffConfigs(oldCfg, newCfg, nil) + + diff := findModuleDiff(t, result, "server") + if diff.Status != DiffStatusUnchanged { + t.Errorf("server status: got %q, want %q", diff.Status, DiffStatusUnchanged) + } +} + +func TestDiffConfigsModuleConfigChanged(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server", Config: map[string]any{"address": ":8080"}}, + ) + newCfg := cfgWithModules( + config.ModuleConfig{Name: "server", Type: "http.server", Config: map[string]any{"address": ":9090"}}, + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + diff := findModuleDiff(t, result, "server") + if diff.Status != DiffStatusChanged { + t.Errorf("server status: got %q, want %q", diff.Status, DiffStatusChanged) + } +} + +func TestDiffConfigsStatefulModuleConfigChanged(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{ + Name: "orders-db", + Type: "storage.sqlite", + Config: map[string]any{"dbPath": "/data/old.db"}, + }, + ) + newCfg := cfgWithModules( + config.ModuleConfig{ + Name: "orders-db", + Type: "storage.sqlite", + Config: map[string]any{"dbPath": "/data/new.db"}, + }, + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + diff := findModuleDiff(t, result, "orders-db") + if diff.Status != DiffStatusChanged { + t.Errorf("orders-db status: got %q, want %q", diff.Status, DiffStatusChanged) + } + if len(diff.BreakingChanges) == 0 { + t.Error("expected breaking changes for stateful module config change") + } + if len(result.BreakingChanges) == 0 { + t.Error("expected top-level breaking changes") + } +} + +func TestDiffConfigsModuleTypeChanged(t *testing.T) { + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "db", Type: "storage.sqlite"}, + ) + newCfg := cfgWithModules( + config.ModuleConfig{Name: "db", Type: "database.workflow"}, + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + diff := findModuleDiff(t, result, "db") + if diff.Status != DiffStatusChanged { + t.Errorf("db status: got %q, want %q", diff.Status, DiffStatusChanged) + } + if !strings.Contains(diff.Detail, "TYPE CHANGED") { + t.Errorf("expected TYPE CHANGED in detail, got: %s", diff.Detail) + } +} + +// --- Pipeline diff tests --- + +func TestDiffConfigsPipelineAdded(t *testing.T) { + oldCfg := cfgWithPipelines() + newCfg := cfgWithPipelines( + "create-order", httpPipeline("POST", "/api/v1/orders", 3), + ) + + result := diffConfigs(oldCfg, newCfg, nil) + + pl := findPipelineDiff(t, result, "create-order") + if pl.Status != DiffStatusAdded { + t.Errorf("create-order status: got %q, want %q", pl.Status, DiffStatusAdded) + } +} + +func TestDiffConfigsPipelineRemoved(t *testing.T) { + oldCfg := cfgWithPipelines( + "legacy", httpPipeline("GET", "/api/v1/legacy", 1), + ) + newCfg := cfgWithPipelines() + + result := diffConfigs(oldCfg, newCfg, nil) + + pl := findPipelineDiff(t, result, "legacy") + if pl.Status != DiffStatusRemoved { + t.Errorf("legacy status: got %q, want %q", pl.Status, DiffStatusRemoved) + } +} + +func TestDiffConfigsPipelineUnchanged(t *testing.T) { + p := httpPipeline("GET", "/api/v1/orders", 2) + oldCfg := cfgWithPipelines("list-orders", p) + newCfg := cfgWithPipelines("list-orders", p) + + result := diffConfigs(oldCfg, newCfg, nil) + + pl := findPipelineDiff(t, result, "list-orders") + if pl.Status != DiffStatusUnchanged { + t.Errorf("list-orders status: got %q, want %q", pl.Status, DiffStatusUnchanged) + } +} + +func TestDiffConfigsPipelineStepsChanged(t *testing.T) { + oldCfg := cfgWithPipelines("create-order", httpPipeline("POST", "/api/v1/orders", 3)) + newCfg := cfgWithPipelines("create-order", httpPipeline("POST", "/api/v1/orders", 5)) + + result := diffConfigs(oldCfg, newCfg, nil) + + pl := findPipelineDiff(t, result, "create-order") + if pl.Status != DiffStatusChanged { + t.Errorf("create-order status: got %q, want %q", pl.Status, DiffStatusChanged) + } + if !strings.Contains(pl.Detail, "STEPS CHANGED") { + t.Errorf("expected STEPS CHANGED in detail, got: %s", pl.Detail) + } + if !strings.Contains(pl.Detail, "3 → 5") { + t.Errorf("expected step counts 3 → 5 in detail, got: %s", pl.Detail) + } +} + +func TestDiffConfigsPipelineTriggerChanged(t *testing.T) { + oldCfg := cfgWithPipelines("my-pipeline", httpPipeline("GET", "/old-path", 2)) + newCfg := cfgWithPipelines("my-pipeline", httpPipeline("GET", "/new-path", 2)) + + result := diffConfigs(oldCfg, newCfg, nil) + + pl := findPipelineDiff(t, result, "my-pipeline") + if pl.Status != DiffStatusChanged { + t.Errorf("my-pipeline status: got %q, want %q", pl.Status, DiffStatusChanged) + } + if !strings.Contains(pl.Detail, "TRIGGER CHANGED") { + t.Errorf("expected TRIGGER CHANGED in detail, got: %s", pl.Detail) + } +} + +// --- State correlation --- + +func TestDiffConfigsStateCorrelation(t *testing.T) { + state := &DeploymentState{ + Resources: DeployedResources{ + Modules: map[string]DeployedModuleState{ + "orders-db": { + Type: "storage.sqlite", + Stateful: true, + ResourceID: "database/prod-orders-db", + }, + }, + }, + } + + oldCfg := cfgWithModules( + config.ModuleConfig{Name: "orders-db", Type: "storage.sqlite"}, + ) + newCfg := cfgWithModules( + config.ModuleConfig{Name: "orders-db", Type: "storage.sqlite"}, + ) + + result := diffConfigs(oldCfg, newCfg, state) + + diff := findModuleDiff(t, result, "orders-db") + if diff.ResourceID != "database/prod-orders-db" { + t.Errorf("ResourceID: got %q, want database/prod-orders-db", diff.ResourceID) + } +} + +// --- runDiff integration --- + +func TestRunDiffMissingArgs(t *testing.T) { + err := runDiff([]string{}) + if err == nil { + t.Fatal("expected error when no args given") + } +} + +func TestRunDiffMissingSecondArg(t *testing.T) { + dir := t.TempDir() + p := filepath.Join(dir, "a.yaml") + writeTestConfigFile(t, p, simpleConfig()) + err := runDiff([]string{p}) + if err == nil { + t.Fatal("expected error when second config missing") + } +} + +func TestRunDiffText(t *testing.T) { + dir := t.TempDir() + oldPath := filepath.Join(dir, "old.yaml") + newPath := filepath.Join(dir, "new.yaml") + + writeTestConfigFile(t, oldPath, ` +modules: + - name: server + type: http.server + - name: orders-db + type: storage.sqlite + config: + dbPath: /data/orders.db +`) + writeTestConfigFile(t, newPath, ` +modules: + - name: server + type: http.server + - name: orders-db + type: storage.sqlite + config: + dbPath: /data/new.db + - name: new-cache + type: cache.redis +`) + + if err := runDiff([]string{oldPath, newPath}); err != nil { + t.Fatalf("runDiff failed: %v", err) + } +} + +func TestRunDiffJSON(t *testing.T) { + dir := t.TempDir() + oldPath := filepath.Join(dir, "old.yaml") + newPath := filepath.Join(dir, "new.yaml") + + writeTestConfigFile(t, oldPath, simpleConfig()) + writeTestConfigFile(t, newPath, simpleConfig()) + + // Redirect stdout capture is tricky; just verify it runs without error. + if err := runDiff([]string{"-format", "json", oldPath, newPath}); err != nil { + t.Fatalf("runDiff -format json failed: %v", err) + } +} + +func TestRunDiffCheckBreaking(t *testing.T) { + dir := t.TempDir() + oldPath := filepath.Join(dir, "old.yaml") + newPath := filepath.Join(dir, "new.yaml") + + writeTestConfigFile(t, oldPath, ` +modules: + - name: orders-db + type: storage.sqlite + config: + dbPath: /data/old.db +`) + writeTestConfigFile(t, newPath, ` +modules: + - name: orders-db + type: storage.sqlite + config: + dbPath: /data/new.db +`) + + err := runDiff([]string{"-check-breaking", oldPath, newPath}) + if err == nil { + t.Fatal("expected error from -check-breaking with breaking changes") + } + if !strings.Contains(err.Error(), "breaking change") { + t.Errorf("expected breaking change error, got: %v", err) + } +} + +func TestRunDiffNoBreakingChanges(t *testing.T) { + dir := t.TempDir() + oldPath := filepath.Join(dir, "old.yaml") + newPath := filepath.Join(dir, "new.yaml") + + writeTestConfigFile(t, oldPath, simpleConfig()) + writeTestConfigFile(t, newPath, simpleConfig()) + + // With -check-breaking and no breaking changes, should succeed. + if err := runDiff([]string{"-check-breaking", oldPath, newPath}); err != nil { + t.Fatalf("expected no error when no breaking changes, got: %v", err) + } +} + +func TestRunDiffMissingStateFile(t *testing.T) { + dir := t.TempDir() + oldPath := filepath.Join(dir, "old.yaml") + newPath := filepath.Join(dir, "new.yaml") + writeTestConfigFile(t, oldPath, simpleConfig()) + writeTestConfigFile(t, newPath, simpleConfig()) + + err := runDiff([]string{"-state", "/nonexistent/state.json", oldPath, newPath}) + if err == nil { + t.Fatal("expected error for missing state file") + } +} + +// --- describePipelineTrigger --- + +func TestDescribePipelineTrigger(t *testing.T) { + p := httpPipeline("POST", "/api/v1/orders", 2) + got := describePipelineTrigger(p) + if !strings.Contains(got, "http") { + t.Errorf("expected 'http' in trigger description, got: %s", got) + } + if !strings.Contains(got, "POST") { + t.Errorf("expected 'POST' in trigger description, got: %s", got) + } + if !strings.Contains(got, "/api/v1/orders") { + t.Errorf("expected path in trigger description, got: %s", got) + } +} + +func TestDescribePipelineTriggerNil(t *testing.T) { + got := describePipelineTrigger(nil) + if got == "" { + t.Error("expected non-empty description for nil pipeline") + } +} + +// --- JSON roundtrip for DiffResult --- + +func TestDiffResultJSONRoundtrip(t *testing.T) { + result := DiffResult{ + OldConfig: "old.yaml", + NewConfig: "new.yaml", + Modules: []ModuleDiff{ + {Name: "server", Status: DiffStatusUnchanged, Type: "http.server"}, + }, + Pipelines: []PipelineDiff{ + {Name: "create-order", Status: DiffStatusAdded, Trigger: "http POST /orders"}, + }, + } + + data, err := json.Marshal(result) + if err != nil { + t.Fatalf("marshal DiffResult: %v", err) + } + + var decoded DiffResult + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("unmarshal DiffResult: %v", err) + } + + if decoded.Modules[0].Name != "server" { + t.Errorf("expected server module, got %q", decoded.Modules[0].Name) + } + if decoded.Pipelines[0].Status != DiffStatusAdded { + t.Errorf("expected added pipeline, got %q", decoded.Pipelines[0].Status) + } +} + +// --- Helpers --- + +func cfgWithModules(mods ...config.ModuleConfig) *config.WorkflowConfig { + return &config.WorkflowConfig{ + Modules: mods, + Pipelines: map[string]any{}, + Workflows: map[string]any{}, + Triggers: map[string]any{}, + } +} + +func cfgWithPipelines(pairs ...any) *config.WorkflowConfig { + pipelines := map[string]any{} + for i := 0; i+1 < len(pairs); i += 2 { + name, _ := pairs[i].(string) + val := pairs[i+1] + pipelines[name] = val + } + return &config.WorkflowConfig{ + Modules: []config.ModuleConfig{}, + Pipelines: pipelines, + Workflows: map[string]any{}, + Triggers: map[string]any{}, + } +} + +// httpPipeline builds a raw pipeline map with an HTTP trigger and n stub steps. +func httpPipeline(method, path string, n int) map[string]any { + steps := make([]any, n) + for i := range steps { + steps[i] = map[string]any{"name": "step", "type": "step.noop"} + } + return map[string]any{ + "trigger": map[string]any{ + "type": "http", + "config": map[string]any{ + "method": method, + "path": path, + }, + }, + "steps": steps, + } +} + +func findModuleDiff(t *testing.T, result DiffResult, name string) ModuleDiff { + t.Helper() + for _, m := range result.Modules { + if m.Name == name { + return m + } + } + t.Fatalf("module %q not found in diff result", name) + return ModuleDiff{} +} + +func findPipelineDiff(t *testing.T, result DiffResult, name string) PipelineDiff { + t.Helper() + for _, p := range result.Pipelines { + if p.Name == name { + return p + } + } + t.Fatalf("pipeline %q not found in diff result", name) + return PipelineDiff{} +} + +func writeTestConfigFile(t *testing.T, path, content string) { + t.Helper() + if err := os.WriteFile(path, []byte(content), 0644); err != nil { + t.Fatalf("write test config %q: %v", path, err) + } +} + +func simpleConfig() string { + return ` +modules: + - name: server + type: http.server + config: + address: ":8080" +` +} diff --git a/cmd/wfctl/main.go b/cmd/wfctl/main.go index e538617d..563e497f 100644 --- a/cmd/wfctl/main.go +++ b/cmd/wfctl/main.go @@ -22,6 +22,7 @@ var commands = map[string]func([]string) error{ "publish": runPublish, "deploy": runDeploy, "api": runAPI, + "diff": runDiff, } func usage() { @@ -45,6 +46,7 @@ Commands: publish Prepare and publish a plugin manifest to the workflow-registry deploy Deploy the workflow application (docker, kubernetes, cloud) api API tooling (extract: generate OpenAPI 3.0 spec from config) + diff Compare two workflow config files and show what changed Run 'wfctl -h' for command-specific help. `, version) diff --git a/cmd/wfctl/resource_correlation.go b/cmd/wfctl/resource_correlation.go new file mode 100644 index 00000000..78166044 --- /dev/null +++ b/cmd/wfctl/resource_correlation.go @@ -0,0 +1,173 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/GoCodeAlone/workflow/config" +) + +// ResourceKind categorizes a module's infrastructure resource type. +type ResourceKind string + +const ( + ResourceKindDatabase ResourceKind = "database" + ResourceKindBroker ResourceKind = "broker" + ResourceKindCache ResourceKind = "cache" + ResourceKindVolume ResourceKind = "volume" + ResourceKindStateless ResourceKind = "stateless" +) + +// ClassifyModule maps a workflow module type string to a ResourceKind. +func ClassifyModule(moduleType string) ResourceKind { + switch moduleType { + case "storage.sqlite", "database.workflow", "persistence.store": + return ResourceKindDatabase + case "messaging.broker", "messaging.nats", "messaging.kafka", "messaging.broker.eventbus": + return ResourceKindBroker + case "cache.redis": + return ResourceKindCache + case "static.fileserver": + return ResourceKindVolume + default: + return ResourceKindStateless + } +} + +// IsStateful returns true if the module type manages persistent state that +// must survive redeployments. +func IsStateful(moduleType string) bool { + switch ClassifyModule(moduleType) { + case ResourceKindDatabase, ResourceKindBroker, ResourceKindVolume: + return true + case ResourceKindCache: + // Redis is semi-stateful: ephemeral by default but can be persistent. + return false + default: + return false + } +} + +// GenerateResourceID produces a deterministic resource identifier that links +// a workflow module to the infrastructure resource it owns. +// +// Format: /- +// e.g. database/prod-orders-db +func GenerateResourceID(moduleName, moduleType, namespace string) string { + kind := ClassifyModule(moduleType) + if namespace == "" { + return fmt.Sprintf("%s/%s", kind, moduleName) + } + return fmt.Sprintf("%s/%s-%s", kind, namespace, moduleName) +} + +// BreakingChange describes a single breaking change detected when comparing +// two module configurations. +type BreakingChange struct { + // Field is the config key that changed (empty if the whole module changed). + Field string + // OldValue is the previous value as a string representation. + OldValue string + // NewValue is the new value as a string representation. + NewValue string + // Message is a human-readable description. + Message string +} + +// DetectBreakingChanges compares old and new ModuleConfig instances and +// returns a list of changes that could cause data loss or service disruption. +// Only meaningful for stateful module types; callers should check IsStateful +// before acting on an empty result. +func DetectBreakingChanges(oldMod, newMod *config.ModuleConfig) []BreakingChange { + if oldMod == nil || newMod == nil { + return nil + } + + var changes []BreakingChange + + // Type change is always breaking. + if oldMod.Type != newMod.Type { + changes = append(changes, BreakingChange{ + Field: "type", + OldValue: oldMod.Type, + NewValue: newMod.Type, + Message: fmt.Sprintf("module type changed from %q to %q", oldMod.Type, newMod.Type), + }) + // Don't bother comparing config if the type changed entirely. + return changes + } + + // For stateful modules, flag any config key changes that could affect data + // location or connectivity. + if !IsStateful(oldMod.Type) { + return nil + } + + breakingKeys := statefulBreakingKeys(oldMod.Type) + for _, key := range breakingKeys { + oldVal := configValueStr(oldMod.Config, key) + newVal := configValueStr(newMod.Config, key) + if oldVal != newVal { + changes = append(changes, BreakingChange{ + Field: key, + OldValue: oldVal, + NewValue: newVal, + Message: fmt.Sprintf("config key %q changed: %s → %s", + key, describeValue(oldVal), describeValue(newVal)), + }) + } + } + + return changes +} + +// statefulBreakingKeys returns the config keys that are breaking for a given +// stateful module type when changed. +func statefulBreakingKeys(moduleType string) []string { + switch moduleType { + case "storage.sqlite": + return []string{"dbPath", "path"} + case "database.workflow": + return []string{"dsn", "driver", "host", "port", "database", "dbname"} + case "persistence.store": + return []string{"database"} + case "messaging.broker", "messaging.broker.eventbus": + return []string{"persistence", "dataDir"} + case "messaging.nats": + return []string{"url", "clusterID"} + case "messaging.kafka": + return []string{"brokers", "topic"} + case "static.fileserver": + return []string{"rootDir", "dir"} + default: + return nil + } +} + +// configValueStr returns the string representation of a config map value for +// the given key, or an empty string if missing. +func configValueStr(cfg map[string]any, key string) string { + if cfg == nil { + return "" + } + v, ok := cfg[key] + if !ok { + return "" + } + return fmt.Sprintf("%v", v) +} + +// describeValue renders a config value for display, showing "(unset)" for +// empty strings. +func describeValue(v string) string { + if v == "" { + return "(unset)" + } + return v +} + +// moduleTypeLabel returns a short human-readable label for a module type, +// stripping well-known prefixes to keep output concise. +func moduleTypeLabel(moduleType string) string { + return strings.TrimSpace(moduleType) +} diff --git a/cmd/wfctl/resource_correlation_test.go b/cmd/wfctl/resource_correlation_test.go new file mode 100644 index 00000000..fa72e5db --- /dev/null +++ b/cmd/wfctl/resource_correlation_test.go @@ -0,0 +1,208 @@ +package main + +import ( + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/config" +) + +// --- ClassifyModule --- + +func TestClassifyModuleDatabase(t *testing.T) { + for _, ty := range []string{"storage.sqlite", "database.workflow", "persistence.store"} { + if got := ClassifyModule(ty); got != ResourceKindDatabase { + t.Errorf("ClassifyModule(%q) = %q, want %q", ty, got, ResourceKindDatabase) + } + } +} + +func TestClassifyModuleBroker(t *testing.T) { + for _, ty := range []string{"messaging.broker", "messaging.nats", "messaging.kafka", "messaging.broker.eventbus"} { + if got := ClassifyModule(ty); got != ResourceKindBroker { + t.Errorf("ClassifyModule(%q) = %q, want %q", ty, got, ResourceKindBroker) + } + } +} + +func TestClassifyModuleCache(t *testing.T) { + if got := ClassifyModule("cache.redis"); got != ResourceKindCache { + t.Errorf("ClassifyModule(cache.redis) = %q, want %q", got, ResourceKindCache) + } +} + +func TestClassifyModuleVolume(t *testing.T) { + if got := ClassifyModule("static.fileserver"); got != ResourceKindVolume { + t.Errorf("ClassifyModule(static.fileserver) = %q, want %q", got, ResourceKindVolume) + } +} + +func TestClassifyModuleStateless(t *testing.T) { + for _, ty := range []string{ + "http.server", + "http.router", + "auth.jwt", + "openapi", + "observability.prometheus", + "http.middleware.cors", + "unknown.type", + "", + } { + if got := ClassifyModule(ty); got != ResourceKindStateless { + t.Errorf("ClassifyModule(%q) = %q, want %q", ty, got, ResourceKindStateless) + } + } +} + +// --- IsStateful --- + +func TestIsStatefulTrue(t *testing.T) { + for _, ty := range []string{ + "storage.sqlite", + "database.workflow", + "persistence.store", + "messaging.broker", + "messaging.nats", + "messaging.kafka", + "static.fileserver", + } { + if !IsStateful(ty) { + t.Errorf("IsStateful(%q) = false, want true", ty) + } + } +} + +func TestIsStatefulFalse(t *testing.T) { + for _, ty := range []string{ + "http.server", + "http.router", + "auth.jwt", + "cache.redis", // semi-stateful but ephemeral by default + "openapi", + "observability.prometheus", + "", + } { + if IsStateful(ty) { + t.Errorf("IsStateful(%q) = true, want false", ty) + } + } +} + +// --- GenerateResourceID --- + +func TestGenerateResourceIDWithNamespace(t *testing.T) { + id := GenerateResourceID("orders-db", "storage.sqlite", "prod") + if id != "database/prod-orders-db" { + t.Errorf("GenerateResourceID = %q, want %q", id, "database/prod-orders-db") + } +} + +func TestGenerateResourceIDWithoutNamespace(t *testing.T) { + id := GenerateResourceID("event-broker", "messaging.broker", "") + if id != "broker/event-broker" { + t.Errorf("GenerateResourceID = %q, want %q", id, "broker/event-broker") + } +} + +func TestGenerateResourceIDCache(t *testing.T) { + id := GenerateResourceID("session-cache", "cache.redis", "staging") + if id != "cache/staging-session-cache" { + t.Errorf("GenerateResourceID = %q, want %q", id, "cache/staging-session-cache") + } +} + +// --- DetectBreakingChanges --- + +func TestDetectBreakingChangesNilInputs(t *testing.T) { + if changes := DetectBreakingChanges(nil, nil); len(changes) != 0 { + t.Errorf("expected no changes for nil inputs, got %d", len(changes)) + } + mod := &config.ModuleConfig{Name: "x", Type: "storage.sqlite"} + if changes := DetectBreakingChanges(nil, mod); len(changes) != 0 { + t.Errorf("expected no changes when old is nil, got %d", len(changes)) + } + if changes := DetectBreakingChanges(mod, nil); len(changes) != 0 { + t.Errorf("expected no changes when new is nil, got %d", len(changes)) + } +} + +func TestDetectBreakingChangesTypeChanged(t *testing.T) { + old := &config.ModuleConfig{Name: "db", Type: "storage.sqlite"} + nw := &config.ModuleConfig{Name: "db", Type: "database.workflow"} + changes := DetectBreakingChanges(old, nw) + if len(changes) != 1 { + t.Fatalf("expected 1 change for type switch, got %d", len(changes)) + } + if changes[0].Field != "type" { + t.Errorf("expected field=type, got %q", changes[0].Field) + } +} + +func TestDetectBreakingChangesStatefulConfigChanged(t *testing.T) { + old := &config.ModuleConfig{ + Name: "orders-db", + Type: "storage.sqlite", + Config: map[string]any{"dbPath": "/data/old.db"}, + } + nw := &config.ModuleConfig{ + Name: "orders-db", + Type: "storage.sqlite", + Config: map[string]any{"dbPath": "/data/new.db"}, + } + changes := DetectBreakingChanges(old, nw) + if len(changes) == 0 { + t.Fatal("expected breaking change for dbPath change") + } + if !strings.Contains(changes[0].Message, "dbPath") { + t.Errorf("expected message to mention dbPath, got: %s", changes[0].Message) + } +} + +func TestDetectBreakingChangesStatelessNoBreaking(t *testing.T) { + old := &config.ModuleConfig{ + Name: "server", + Type: "http.server", + Config: map[string]any{"address": ":8080"}, + } + nw := &config.ModuleConfig{ + Name: "server", + Type: "http.server", + Config: map[string]any{"address": ":9090"}, + } + changes := DetectBreakingChanges(old, nw) + if len(changes) != 0 { + t.Errorf("expected no breaking changes for stateless module, got %d", len(changes)) + } +} + +func TestDetectBreakingChangesUnchanged(t *testing.T) { + mod := &config.ModuleConfig{ + Name: "orders-db", + Type: "storage.sqlite", + Config: map[string]any{"dbPath": "/data/orders.db"}, + } + changes := DetectBreakingChanges(mod, mod) + if len(changes) != 0 { + t.Errorf("expected no changes for identical modules, got %d", len(changes)) + } +} + +func TestDetectBreakingChangesDatabaseWorkflow(t *testing.T) { + old := &config.ModuleConfig{ + Name: "main-db", + Type: "database.workflow", + Config: map[string]any{"dsn": "postgres://host1/db1"}, + } + nw := &config.ModuleConfig{ + Name: "main-db", + Type: "database.workflow", + Config: map[string]any{"dsn": "postgres://host2/db1"}, + } + changes := DetectBreakingChanges(old, nw) + if len(changes) == 0 { + t.Fatal("expected breaking change for DSN change") + } + if changes[0].Field != "dsn" { + t.Errorf("expected field=dsn, got %q", changes[0].Field) + } +}