From 3d2f034b400b317144483beb4b08110c2a0b7814 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 11 Mar 2026 22:51:10 +0000 Subject: [PATCH 1/6] Initial plan From bfc645a3a2131cd87827779002008e324bc45959 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 11 Mar 2026 23:10:37 +0000 Subject: [PATCH 2/6] fix: sync DOCUMENTATION.md with all registered module/step types and add CI coverage test - Add TestDocumentationCoverage test in plugins/all that fails if any registered module or step type is missing from DOCUMENTATION.md (prevents future drift) - Update module type tables: add Plugin column, add 35 previously undocumented module types (api.gateway, auth.m2m, auth.oauth2, auth.token-blacklist, security.field-protection, openapi, config.provider, database.partitioned, dlq.service, eventstore.service, featureflag.service, timeline.service, nosql.*, platform.kubernetes/ecs/dns/networking/..., argo.workflows, app.container, aws.codebuild, gitlab.webhook/client, cloud.account, security.scanner, policy.mock, storage.artifact, cache.redis, tracing.propagation, http.middleware.otel, etc.) - Mark removed types as deprecated with migration notes (auth.modular, chimux.router, database.modular, eventlogger.modular, httpclient.modular, httpserver.modular, eventbus.modular) - Add 104 previously undocumented step types across all plugins with Plugin column - Add Platform & Infrastructure Pipeline Steps section (47 new step types) - Add new module sections: NoSQL/Datastores, Event Sourcing & Messaging Services - Add detailed reference docs for openapi, auth.m2m, api.gateway, database.partitioned, config.provider, featureflag.service, dlq.service, eventstore.service, timeline.service Co-authored-by: intel352 <77607+intel352@users.noreply.github.com> --- DOCUMENTATION.md | 811 +++++++++++++++++++++++++------ plugins/all/doc_coverage_test.go | 75 +++ 2 files changed, 725 insertions(+), 161 deletions(-) create mode 100644 plugins/all/doc_coverage_test.go diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index ed165ed5..f0e6726e 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -2,7 +2,7 @@ ## Overview -The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 50+ built-in module types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. +The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 100+ built-in module types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. ## Core Engine @@ -19,70 +19,96 @@ The engine is built on the [CrisisTextLine/modular](https://github.com/CrisisTex - `cmd/server` -- runs workflow configs as a server process - `cmd/wfctl` -- validates and inspects workflow configs offline -## Module Types (50+) +## Module Types (100+) -All modules are registered in `engine.go` and instantiated from YAML config. Organized by category: +All modules are instantiated from YAML config via the plugin factory registry. Organized by category. Each module type is provided by a plugin (see **Plugin** column); all are included when using `plugins/all`. + +> **Plugin tiers:** *Core* plugins are loaded by default. Plugin column shows which plugin package registers the type. ### HTTP & Routing -| Type | Description | -|------|-------------| -| `http.server` | Configurable web server | -| `http.router` | Request routing with path and method matching | -| `http.handler` | HTTP request processing with configurable responses | -| `http.proxy` | HTTP reverse proxy | -| `http.simple_proxy` | Simplified proxy configuration | -| `httpserver.modular` | Modular framework HTTP server integration | -| `httpclient.modular` | Modular framework HTTP client | -| `chimux.router` | Chi mux-based router | -| `reverseproxy` | Modular framework reverse proxy (v2) | -| `static.fileserver` | Static file serving | +| Type | Description | Plugin | +|------|-------------|--------| +| `http.server` | Configurable web server | http | +| `http.router` | Request routing with path and method matching | http | +| `http.handler` | HTTP request processing with configurable responses | http | +| `http.proxy` | HTTP reverse proxy | http | +| `http.simple_proxy` | Simplified proxy configuration | http | +| `reverseproxy` | Modular framework reverse proxy (v2) | http | +| `static.fileserver` | Static file serving | http | +| `openapi` | OpenAPI v3 spec-driven HTTP route generation with request/response validation and Swagger UI | openapi | + +> `httpserver.modular`, `httpclient.modular`, and `chimux.router` were removed in favour of `http.server`, `http.router`, and `reverseproxy`. ### Middleware -| Type | Description | -|------|-------------| -| `http.middleware.auth` | Authentication middleware | -| `http.middleware.cors` | CORS header management | -| `http.middleware.logging` | Request/response logging | -| `http.middleware.ratelimit` | Rate limiting | -| `http.middleware.requestid` | Request ID injection | -| `http.middleware.securityheaders` | Security header injection | +| Type | Description | Plugin | +|------|-------------|--------| +| `http.middleware.auth` | Authentication middleware | http | +| `http.middleware.cors` | CORS header management | http | +| `http.middleware.logging` | Request/response logging | http | +| `http.middleware.ratelimit` | Rate limiting | http | +| `http.middleware.requestid` | Request ID injection | http | +| `http.middleware.securityheaders` | Security header injection | http | +| `http.middleware.otel` | OpenTelemetry request tracing middleware | observability | ### Authentication -| Type | Description | -|------|-------------| -| `auth.jwt` | JWT authentication with seed users, persistence, token refresh | -| `auth.modular` | Modular framework auth integration | -| `auth.user-store` | User storage backend | +| Type | Description | Plugin | +|------|-------------|--------| +| `auth.jwt` | JWT authentication with seed users, persistence, token refresh | auth | +| `auth.user-store` | User storage backend | auth | +| `auth.oauth2` | OAuth2 authorization code flow (Google, GitHub, generic OIDC) | auth | +| `auth.m2m` | Machine-to-machine OAuth2: client_credentials grant, JWT-bearer, ES256/HS256, JWKS endpoint | auth | +| `auth.token-blacklist` | Token revocation blacklist backed by SQLite or in-memory store | auth | +| `security.field-protection` | Field-level encryption/decryption for sensitive data fields | auth | + +> `auth.modular` was removed in favour of `auth.jwt`. ### API & CQRS -| Type | Description | -|------|-------------| -| `api.handler` | Generic REST resource handler | -| `api.command` | CQRS command handler with route pipelines | -| `api.query` | CQRS query handler with route pipelines | +| Type | Description | Plugin | +|------|-------------|--------| +| `api.handler` | Generic REST resource handler | api | +| `api.command` | CQRS command handler with route pipelines | api | +| `api.query` | CQRS query handler with route pipelines | api | +| `api.gateway` | Composable API gateway: routing, auth, rate limiting, CORS, and reverse proxying | api | +| `workflow.registry` | SQLite-backed registry for companies, organizations, projects, and workflows | api | +| `data.transformer` | Data transformation between formats using configurable pipelines | api | +| `processing.step` | Executes a component as a processing step in a workflow, with retry and compensation | api | ### State Machine -| Type | Description | -|------|-------------| -| `statemachine.engine` | State definitions, transitions, hooks, auto-transitions | -| `state.tracker` | State observation and tracking | -| `state.connector` | State machine interconnection | +| Type | Description | Plugin | +|------|-------------|--------| +| `statemachine.engine` | State definitions, transitions, hooks, auto-transitions | statemachine | +| `state.tracker` | State observation and tracking | statemachine | +| `state.connector` | State machine interconnection | statemachine | ### Messaging -| Type | Description | -|------|-------------| -| `messaging.broker` | In-memory message broker | -| `messaging.broker.eventbus` | EventBus-backed message broker | -| `messaging.handler` | Message processing handler | -| `messaging.kafka` | Apache Kafka broker integration | -| `messaging.nats` | NATS broker integration | +| Type | Description | Plugin | +|------|-------------|--------| +| `messaging.broker` | In-memory message broker | messaging | +| `messaging.broker.eventbus` | EventBus-backed message broker | messaging | +| `messaging.handler` | Message processing handler | messaging | +| `messaging.kafka` | Apache Kafka broker integration | messaging | +| `messaging.nats` | NATS broker integration | messaging | +| `notification.slack` | Slack notification sender | messaging | +| `webhook.sender` | Outbound webhook delivery with retry and dead letter | messaging | + +> `eventbus.modular` was removed in favour of `messaging.broker.eventbus`. ### Database & Persistence -| Type | Description | -|------|-------------| -| `database.modular` | Modular framework database integration | -| `database.workflow` | Workflow-specific database (SQLite + PostgreSQL) | -| `persistence.store` | Write-through persistence (SQLite/PostgreSQL) | +| Type | Description | Plugin | +|------|-------------|--------| +| `database.workflow` | Workflow-specific database (SQLite + PostgreSQL) | storage | +| `persistence.store` | Write-through persistence (SQLite/PostgreSQL) | storage | +| `database.partitioned` | PostgreSQL partitioned database for multi-tenant data isolation (LIST/RANGE partitions) | storage | + +> `database.modular` was removed in favour of `database.workflow`. + +### NoSQL / Datastores +| Type | Description | Plugin | +|------|-------------|--------| +| `nosql.memory` | In-memory key-value NoSQL store for development and testing | datastores | +| `nosql.dynamodb` | AWS DynamoDB NoSQL store | datastores | +| `nosql.mongodb` | MongoDB document store | datastores | +| `nosql.redis` | Redis key-value store | datastores | ### Pipeline Steps @@ -107,70 +133,178 @@ flowchart TD style I fill:#e8f5e9,stroke:#388e3c ``` -| Type | Description | -|------|-------------| -| `processing.step` | Configurable processing step | -| `step.validate` | Validates pipeline data against required fields or JSON schema | -| `step.transform` | Transforms data shape and field mapping | -| `step.conditional` | Conditional branching based on field values | -| `step.set` | Sets values in pipeline context with template support | -| `step.log` | Logs pipeline data for debugging | -| `step.publish` | Publishes events to EventBus | -| `step.event_publish` | Publishes events to EventBus with full envelope control | -| `step.http_call` | Makes outbound HTTP requests | -| `step.delegate` | Delegates to a named service | -| `step.request_parse` | Extracts path params, query params, and request body from HTTP requests | -| `step.db_query` | Executes parameterized SQL SELECT queries against a named database | -| `step.db_exec` | Executes parameterized SQL INSERT/UPDATE/DELETE against a named database | -| `step.db_query_cached` | Executes a cached SQL SELECT query | -| `step.db_create_partition` | Creates a time-based table partition | -| `step.db_sync_partitions` | Ensures future partitions exist for a partitioned table | -| `step.json_response` | Writes HTTP JSON response with custom status code and headers | -| `step.raw_response` | Writes a raw HTTP response with arbitrary content type | -| `step.static_file` | Serves a pre-loaded file from disk as an HTTP response | -| `step.workflow_call` | Invokes another workflow pipeline by name | -| `step.validate_path_param` | Validates a URL path parameter against a set of rules | -| `step.validate_pagination` | Validates and normalizes pagination query params | -| `step.validate_request_body` | Validates request body against a JSON schema | -| `step.foreach` | Iterates over a slice and runs sub-steps per element. Optional `concurrency: N` for parallel processing | -| `step.parallel` | Executes named sub-steps concurrently and collects results. O(max(branch)) time | -| `step.webhook_verify` | Verifies an inbound webhook signature | -| `step.base64_decode` | Decodes a base64-encoded field | -| `step.cache_get` | Reads a value from the cache module | -| `step.cache_set` | Writes a value to the cache module | -| `step.cache_delete` | Deletes a value from the cache module | -| `step.ui_scaffold` | Generates UI scaffolding from a workflow config | -| `step.ui_scaffold_analyze` | Analyzes UI scaffold state for a workflow | -| `step.dlq_send` | Sends a message to the dead-letter queue | -| `step.dlq_replay` | Replays messages from the dead-letter queue | -| `step.retry_with_backoff` | Retries a sub-pipeline with exponential backoff | -| `step.resilient_circuit_breaker` | Wraps a sub-pipeline with a circuit breaker | -| `step.s3_upload` | Uploads a file or data to an S3-compatible bucket | -| `step.auth_validate` | Validates an authentication token and populates claims | -| `step.token_revoke` | Revokes an auth token | -| `step.field_reencrypt` | Re-encrypts a field with a new key | -| `step.sandbox_exec` | Executes a command inside a sandboxed container | -| `step.http_proxy` | Proxies an HTTP request to an upstream service | -| `step.hash` | Computes a cryptographic hash (md5/sha256/sha512) of a template-resolved input | -| `step.regex_match` | Matches a regular expression against a template-resolved input | -| `step.jq` | Applies a JQ expression to pipeline data for complex transformations | -| `step.ai_complete` | AI text completion using a configured provider | -| `step.ai_classify` | AI text classification into named categories | -| `step.ai_extract` | AI structured data extraction using tool use or prompt-based parsing | -| `step.actor_send` | Sends a fire-and-forget message to an actor pool (Tell) | -| `step.actor_ask` | Sends a request-response message to an actor and returns the response (Ask) | +| Type | Description | Plugin | +|------|-------------|--------| +| `processing.step` | Configurable processing step | api | +| `step.validate` | Validates pipeline data against required fields or JSON schema | pipelinesteps | +| `step.transform` | Transforms data shape and field mapping | pipelinesteps | +| `step.conditional` | Conditional branching based on field values | pipelinesteps | +| `step.set` | Sets values in pipeline context with template support | pipelinesteps | +| `step.log` | Logs pipeline data for debugging | pipelinesteps | +| `step.publish` | Publishes events to EventBus | pipelinesteps | +| `step.event_publish` | Publishes events to EventBus with full envelope control | pipelinesteps | +| `step.http_call` | Makes outbound HTTP requests | pipelinesteps | +| `step.delegate` | Delegates to a named service | pipelinesteps | +| `step.request_parse` | Extracts path params, query params, and request body from HTTP requests | pipelinesteps | +| `step.db_query` | Executes parameterized SQL SELECT queries against a named database | pipelinesteps | +| `step.db_exec` | Executes parameterized SQL INSERT/UPDATE/DELETE against a named database | pipelinesteps | +| `step.db_query_cached` | Executes a cached SQL SELECT query | pipelinesteps | +| `step.db_create_partition` | Creates a time-based table partition | pipelinesteps | +| `step.db_sync_partitions` | Ensures future partitions exist for a partitioned table | pipelinesteps | +| `step.json_response` | Writes HTTP JSON response with custom status code and headers | pipelinesteps | +| `step.raw_response` | Writes a raw HTTP response with arbitrary content type | pipelinesteps | +| `step.static_file` | Serves a pre-loaded file from disk as an HTTP response | pipelinesteps | +| `step.workflow_call` | Invokes another workflow pipeline by name | pipelinesteps | +| `step.sub_workflow` | Executes a named sub-workflow inline and merges its output | ai | +| `step.validate_path_param` | Validates a URL path parameter against a set of rules | pipelinesteps | +| `step.validate_pagination` | Validates and normalizes pagination query params | pipelinesteps | +| `step.validate_request_body` | Validates request body against a JSON schema | pipelinesteps | +| `step.foreach` | Iterates over a slice and runs sub-steps per element. Optional `concurrency: N` for parallel processing | pipelinesteps | +| `step.parallel` | Executes named sub-steps concurrently and collects results. O(max(branch)) time | pipelinesteps | +| `step.webhook_verify` | Verifies an inbound webhook signature | pipelinesteps | +| `step.base64_decode` | Decodes a base64-encoded field | pipelinesteps | +| `step.cache_get` | Reads a value from the cache module | pipelinesteps | +| `step.cache_set` | Writes a value to the cache module | pipelinesteps | +| `step.cache_delete` | Deletes a value from the cache module | pipelinesteps | +| `step.ui_scaffold` | Generates UI scaffolding from a workflow config | pipelinesteps | +| `step.ui_scaffold_analyze` | Analyzes UI scaffold state for a workflow | pipelinesteps | +| `step.dlq_send` | Sends a message to the dead-letter queue | pipelinesteps | +| `step.dlq_replay` | Replays messages from the dead-letter queue | pipelinesteps | +| `step.retry_with_backoff` | Retries a sub-pipeline with exponential backoff | pipelinesteps | +| `step.resilient_circuit_breaker` | Wraps a sub-pipeline with a circuit breaker | pipelinesteps | +| `step.s3_upload` | Uploads a file or data to an S3-compatible bucket | pipelinesteps | +| `step.auth_validate` | Validates an authentication token and populates claims | pipelinesteps | +| `step.token_revoke` | Revokes an auth token | pipelinesteps | +| `step.field_reencrypt` | Re-encrypts a field with a new key | pipelinesteps | +| `step.sandbox_exec` | Executes a command inside a sandboxed container | pipelinesteps | +| `step.http_proxy` | Proxies an HTTP request to an upstream service | pipelinesteps | +| `step.hash` | Computes a cryptographic hash (md5/sha256/sha512) of a template-resolved input | pipelinesteps | +| `step.regex_match` | Matches a regular expression against a template-resolved input | pipelinesteps | +| `step.jq` | Applies a JQ expression to pipeline data for complex transformations | pipelinesteps | +| `step.ai_complete` | AI text completion using a configured provider | ai | +| `step.ai_classify` | AI text classification into named categories | ai | +| `step.ai_extract` | AI structured data extraction using tool use or prompt-based parsing | ai | +| `step.actor_send` | Sends a fire-and-forget message to an actor pool (Tell) | actors | +| `step.actor_ask` | Sends a request-response message to an actor and returns the response (Ask) | actors | +| `step.rate_limit` | Applies per-client or global rate limiting to a pipeline step | http | +| `step.circuit_breaker` | Wraps a sub-pipeline with a circuit breaker (open/half-open/closed) | http | +| `step.feature_flag` | Evaluates a feature flag and branches based on the result | featureflags | +| `step.ff_gate` | Blocks execution unless a named feature flag is enabled | featureflags | +| `step.authz_check` | Evaluates an authorization policy (OPA, Casbin, or mock) for the current request | policy | +| `step.cli_invoke` | Invokes a registered CLI command by name | scheduler | +| `step.cli_print` | Prints output to stdout (used in CLI workflows) | scheduler | +| `step.statemachine_transition` | Triggers a state machine transition for the given entity | statemachine | +| `step.statemachine_get` | Retrieves the current state and metadata for a state machine entity | statemachine | +| `step.nosql_get` | Reads a document from a NoSQL store by key | datastores | +| `step.nosql_put` | Writes a document to a NoSQL store | datastores | +| `step.nosql_delete` | Deletes a document from a NoSQL store by key | datastores | +| `step.nosql_query` | Queries a NoSQL store with filter expressions | datastores | +| `step.artifact_upload` | Uploads a file to the artifact store | storage | +| `step.artifact_download` | Downloads a file from the artifact store | storage | +| `step.artifact_list` | Lists artifacts in the store for a given prefix | storage | +| `step.artifact_delete` | Deletes an artifact from the store | storage | +| `step.secret_rotate` | Rotates a secret in the configured secrets backend | secrets | +| `step.cloud_validate` | Validates cloud account credentials and configuration | cloud | +| `step.trace_start` | Starts an OpenTelemetry trace span for the current pipeline | observability | +| `step.trace_inject` | Injects trace context headers into outgoing request metadata | observability | +| `step.trace_extract` | Extracts trace context from incoming request headers | observability | +| `step.trace_annotate` | Adds key/value annotations to the current trace span | observability | +| `step.trace_link` | Links the current span to an external span by trace/span ID | observability | +| `step.gitlab_trigger_pipeline` | Triggers a GitLab CI/CD pipeline via the GitLab API | gitlab | +| `step.gitlab_pipeline_status` | Polls a GitLab pipeline until it reaches a terminal state | gitlab | +| `step.gitlab_create_mr` | Creates a GitLab merge request | gitlab | +| `step.gitlab_mr_comment` | Adds a comment to a GitLab merge request | gitlab | +| `step.gitlab_parse_webhook` | Parses and validates an inbound GitLab webhook payload | gitlab | +| `step.policy_evaluate` | Evaluates a named policy with the given input and returns allow/deny | policy | +| `step.policy_load` | Loads a policy definition into the policy engine at runtime | policy | +| `step.policy_list` | Lists all loaded policies in the policy engine | policy | +| `step.policy_test` | Runs a policy against test cases and reports pass/fail | policy | +| `step.marketplace_search` | Searches the plugin marketplace for available extensions | marketplace | +| `step.marketplace_detail` | Fetches detail information for a marketplace plugin | marketplace | +| `step.marketplace_install` | Installs a plugin from the marketplace | marketplace | +| `step.marketplace_installed` | Lists installed marketplace plugins | marketplace | +| `step.marketplace_uninstall` | Uninstalls a marketplace plugin | marketplace | +| `step.marketplace_update` | Updates a marketplace plugin to the latest version | marketplace | ### CI/CD Pipeline Steps -| Type | Description | -|------|-------------| -| `step.docker_build` | Builds a Docker image from a context directory and Dockerfile | -| `step.docker_push` | Pushes a Docker image to a remote registry | -| `step.docker_run` | Runs a command inside a Docker container via sandbox | -| `step.scan_sast` | Static Application Security Testing (SAST) via configurable scanner | -| `step.scan_container` | Container image vulnerability scanning via Trivy | -| `step.scan_deps` | Dependency vulnerability scanning via Grype | -| `step.artifact_push` | Stores a file in the artifact store for cross-step sharing | -| `step.artifact_pull` | Retrieves an artifact from a prior execution, URL, or S3 | +| Type | Description | Plugin | +|------|-------------|--------| +| `step.docker_build` | Builds a Docker image from a context directory and Dockerfile | cicd | +| `step.docker_push` | Pushes a Docker image to a remote registry | cicd | +| `step.docker_run` | Runs a command inside a Docker container via sandbox | cicd | +| `step.scan_sast` | Static Application Security Testing (SAST) via configurable scanner | cicd | +| `step.scan_container` | Container image vulnerability scanning via Trivy | cicd | +| `step.scan_deps` | Dependency vulnerability scanning via Grype | cicd | +| `step.artifact_push` | Stores a file in the artifact store for cross-step sharing | cicd | +| `step.artifact_pull` | Retrieves an artifact from a prior execution, URL, or S3 | cicd | +| `step.shell_exec` | Executes an arbitrary shell command | cicd | +| `step.build_binary` | Builds a Go binary using `go build` | cicd | +| `step.build_from_config` | Builds the workflow server binary from a YAML config | cicd | +| `step.build_ui` | Builds the UI assets from a frontend config | cicd | +| `step.deploy` | Deploys a built artifact to an environment | cicd | +| `step.gate` | Manual approval gate — pauses pipeline until an external signal is received | cicd | +| `step.git_clone` | Clones a Git repository | cicd | +| `step.git_commit` | Commits staged changes in a local Git repository | cicd | +| `step.git_push` | Pushes commits to a remote Git repository | cicd | +| `step.git_tag` | Creates and optionally pushes a Git tag | cicd | +| `step.git_checkout` | Checks out a branch, tag, or commit in a local repository | cicd | +| `step.codebuild_create_project` | Creates an AWS CodeBuild project | cicd | +| `step.codebuild_start` | Starts an AWS CodeBuild build | cicd | +| `step.codebuild_status` | Polls an AWS CodeBuild build until completion | cicd | +| `step.codebuild_logs` | Fetches logs from an AWS CodeBuild build | cicd | +| `step.codebuild_list_builds` | Lists recent AWS CodeBuild builds for a project | cicd | +| `step.codebuild_delete_project` | Deletes an AWS CodeBuild project | cicd | + +### Platform & Infrastructure Pipeline Steps +| Type | Description | Plugin | +|------|-------------|--------| +| `step.platform_template` | Renders an infrastructure template (Terraform, Helm, etc.) with pipeline context variables | platform | +| `step.k8s_plan` | Generates a Kubernetes deployment plan (dry-run) | platform | +| `step.k8s_apply` | Applies a Kubernetes manifest or deployment config | platform | +| `step.k8s_status` | Retrieves the status of a Kubernetes workload | platform | +| `step.k8s_destroy` | Tears down a Kubernetes workload | platform | +| `step.ecs_plan` | Generates an ECS task/service deployment plan | platform | +| `step.ecs_apply` | Deploys a task or service to AWS ECS | platform | +| `step.ecs_status` | Retrieves the status of an ECS service | platform | +| `step.ecs_destroy` | Removes an ECS task or service | platform | +| `step.iac_plan` | Plans IaC changes (Terraform plan, Pulumi preview, etc.) | platform | +| `step.iac_apply` | Applies IaC changes | platform | +| `step.iac_status` | Retrieves the current state of an IaC stack | platform | +| `step.iac_destroy` | Destroys all resources in an IaC stack | platform | +| `step.iac_drift_detect` | Detects configuration drift between desired and actual state | platform | +| `step.dns_plan` | Plans DNS record changes | platform | +| `step.dns_apply` | Applies DNS record changes | platform | +| `step.dns_status` | Retrieves the current DNS records for a domain | platform | +| `step.network_plan` | Plans networking resource changes (VPC, subnets, etc.) | platform | +| `step.network_apply` | Applies networking resource changes | platform | +| `step.network_status` | Retrieves the status of networking resources | platform | +| `step.apigw_plan` | Plans API gateway configuration changes | platform | +| `step.apigw_apply` | Applies API gateway configuration changes | platform | +| `step.apigw_status` | Retrieves API gateway deployment status | platform | +| `step.apigw_destroy` | Removes an API gateway configuration | platform | +| `step.scaling_plan` | Plans auto-scaling policy changes | platform | +| `step.scaling_apply` | Applies auto-scaling policies | platform | +| `step.scaling_status` | Retrieves current auto-scaling state | platform | +| `step.scaling_destroy` | Removes auto-scaling policies | platform | +| `step.app_deploy` | Deploys a containerized application | platform | +| `step.app_status` | Retrieves deployment status of an application | platform | +| `step.app_rollback` | Rolls back an application to a previous deployment | platform | +| `step.region_deploy` | Deploys workloads to a specific cloud region | platform | +| `step.region_promote` | Promotes a deployment from staging to production across regions | platform | +| `step.region_failover` | Triggers a regional failover | platform | +| `step.region_status` | Retrieves health and routing status for a region | platform | +| `step.region_weight` | Adjusts traffic weight for a region in the router | platform | +| `step.region_sync` | Synchronizes configuration across regions | platform | +| `step.argo_submit` | Submits an Argo Workflow | platform | +| `step.argo_status` | Polls an Argo Workflow until completion | platform | +| `step.argo_logs` | Retrieves logs from an Argo Workflow | platform | +| `step.argo_delete` | Deletes an Argo Workflow | platform | +| `step.argo_list` | Lists Argo Workflows for a namespace | platform | +| `step.do_deploy` | Deploys to DigitalOcean App Platform | platform | +| `step.do_status` | Retrieves DigitalOcean App Platform deployment status | platform | +| `step.do_logs` | Fetches DigitalOcean App Platform runtime logs | platform | +| `step.do_scale` | Scales a DigitalOcean App Platform component | platform | +| `step.do_destroy` | Destroys a DigitalOcean App Platform deployment | platform | ### Template Functions @@ -279,74 +413,429 @@ value: '{{ index .steps "parse-request" "path_params" "id" }}' `wfctl template validate --config workflow.yaml` lints template expressions and warns on undefined step references, forward references, and suggests the `step` function for hyphenated names. ### Infrastructure -| Type | Description | -|------|-------------| -| `license.validator` | License key validation against a remote server with caching and grace period | -| `platform.provider` | Cloud infrastructure provider declaration (e.g., Terraform, Pulumi) | -| `platform.resource` | Infrastructure resource managed by a platform provider | -| `platform.context` | Execution context for platform operations (org, environment, tier) | -| `platform.do_app` | DigitalOcean App Platform deployment (deploy, scale, logs, destroy) | -| `platform.do_networking` | DigitalOcean VPC and firewall management | -| `platform.do_dns` | DigitalOcean domain and DNS record management | -| `platform.do_database` | DigitalOcean Managed Database (PostgreSQL, MySQL, Redis) | -| `iac.state` | IaC state persistence (memory, filesystem, or spaces/S3-compatible backends) | +| Type | Description | Plugin | +|------|-------------|--------| +| `license.validator` | License key validation against a remote server with caching and grace period | license | +| `platform.provider` | Cloud infrastructure provider declaration (e.g., Terraform, Pulumi) | platform | +| `platform.resource` | Infrastructure resource managed by a platform provider | platform | +| `platform.context` | Execution context for platform operations (org, environment, tier) | platform | +| `platform.kubernetes` | Kubernetes cluster deployment target | platform | +| `platform.ecs` | AWS ECS cluster deployment target | platform | +| `platform.dns` | DNS provider for managing records (Route53, CloudFlare, etc.) | platform | +| `platform.networking` | VPC and networking resource management | platform | +| `platform.apigateway` | API gateway resource management (AWS API GW, etc.) | platform | +| `platform.autoscaling` | Auto-scaling policy and target management | platform | +| `platform.region` | Multi-region deployment configuration | platform | +| `platform.region_router` | Routes traffic across regions by weight, latency, or failover | platform | +| `platform.doks` | DigitalOcean Kubernetes Service (DOKS) deployment | platform | +| `platform.do_app` | DigitalOcean App Platform deployment (deploy, scale, logs, destroy) | platform | +| `platform.do_networking` | DigitalOcean VPC and firewall management | platform | +| `platform.do_dns` | DigitalOcean domain and DNS record management | platform | +| `platform.do_database` | DigitalOcean Managed Database (PostgreSQL, MySQL, Redis) | platform | +| `iac.state` | IaC state persistence (memory, filesystem, or spaces/S3-compatible backends) | platform | +| `app.container` | Containerised application deployment descriptor | platform | +| `argo.workflows` | Argo Workflows integration for Kubernetes-native workflow orchestration | platform | +| `aws.codebuild` | AWS CodeBuild project and build management | cicd | ### Observability -| Type | Description | -|------|-------------| -| `metrics.collector` | Prometheus metrics collection and `/metrics` endpoint | -| `health.checker` | Health endpoints (`/healthz`, `/readyz`, `/livez`) | -| `log.collector` | Centralized log collection | -| `observability.otel` | OpenTelemetry tracing integration | -| `eventlogger.modular` | Modular framework event logger | +| Type | Description | Plugin | +|------|-------------|--------| +| `metrics.collector` | Prometheus metrics collection and `/metrics` endpoint | observability | +| `health.checker` | Health endpoints (`/healthz`, `/readyz`, `/livez`) | observability | +| `log.collector` | Centralized log collection | observability | +| `observability.otel` | OpenTelemetry tracing integration | observability | +| `openapi.generator` | OpenAPI spec generation from workflow config | observability | +| `tracing.propagation` | OpenTelemetry trace-context propagation module | observability | + +> `eventlogger.modular` was removed; use `log.collector` or structured slog logging instead. ### Storage -| Type | Description | -|------|-------------| -| `storage.s3` | Amazon S3 storage | -| `storage.gcs` | Google Cloud Storage | -| `storage.local` | Local filesystem storage | -| `storage.sqlite` | SQLite storage | +| Type | Description | Plugin | +|------|-------------|--------| +| `storage.s3` | Amazon S3 storage | storage | +| `storage.gcs` | Google Cloud Storage | storage | +| `storage.local` | Local filesystem storage | storage | +| `storage.sqlite` | SQLite storage | storage | +| `storage.artifact` | Artifact store for build artifacts shared across pipeline steps | storage | +| `cache.redis` | Redis-backed cache module | storage | ### Actor Model -| Type | Description | -|------|-------------| -| `actor.system` | goakt v4 actor system — manages actor lifecycle and fault recovery | -| `actor.pool` | Defines a group of actors with shared behavior, routing strategy, and recovery policy | +| Type | Description | Plugin | +|------|-------------|--------| +| `actor.system` | goakt v4 actor system — manages actor lifecycle and fault recovery | actors | +| `actor.pool` | Defines a group of actors with shared behavior, routing strategy, and recovery policy | actors | ### Scheduling -| Type | Description | -|------|-------------| -| `scheduler.modular` | Cron-based job scheduling | +| Type | Description | Plugin | +|------|-------------|--------| +| `scheduler.modular` | Cron-based job scheduling | modularcompat | ### Integration -| Type | Description | -|------|-------------| -| `webhook.sender` | Outbound webhook delivery with retry and dead letter | -| `notification.slack` | Slack notifications | -| `openapi.consumer` | OpenAPI spec consumer for external service integration | -| `openapi.generator` | OpenAPI spec generation from workflow config | +| Type | Description | Plugin | +|------|-------------|--------| +| `webhook.sender` | Outbound webhook delivery with retry and dead letter | messaging | +| `notification.slack` | Slack notifications | messaging | +| `openapi.consumer` | OpenAPI spec consumer for external service integration | observability | +| `gitlab.webhook` | GitLab webhook receiver and validator | gitlab | +| `gitlab.client` | GitLab API client (pipelines, MRs, repos) | gitlab | +| `cloud.account` | Cloud account credential holder (AWS, GCP, Azure) | cloud | +| `security.scanner` | Security scanning provider for SAST/container/dependency scans | scanner | +| `policy.mock` | In-memory mock policy engine for testing | policy | ### Secrets -| Type | Description | -|------|-------------| -| `secrets.vault` | HashiCorp Vault integration | -| `secrets.aws` | AWS Secrets Manager integration | +| Type | Description | Plugin | +|------|-------------|--------| +| `secrets.vault` | HashiCorp Vault integration | secrets | +| `secrets.aws` | AWS Secrets Manager integration | secrets | + +### Event Sourcing & Messaging Services +| Type | Description | Plugin | +|------|-------------|--------| +| `eventstore.service` | Append-only SQLite event store for execution history | eventstore | +| `dlq.service` | Dead-letter queue service for failed message management | dlq | +| `timeline.service` | Timeline and replay service for execution visualization | timeline | +| `featureflag.service` | Feature flag evaluation engine with SSE change streaming | featureflags | +| `config.provider` | Application configuration registry with schema validation, defaults, and source layering | configprovider | ### Other -| Type | Description | -|------|-------------| -| `cache.modular` | Modular framework cache | -| `jsonschema.modular` | JSON Schema validation | -| `eventbus.modular` | Modular framework EventBus | -| `dynamic.component` | Yaegi hot-reload Go component | -| `data.transformer` | Data transformation | -| `workflow.registry` | Workflow registration and discovery | +| Type | Description | Plugin | +|------|-------------|--------| +| `cache.modular` | Modular framework cache | modularcompat | +| `jsonschema.modular` | JSON Schema validation | modularcompat | +| `dynamic.component` | Yaegi hot-reload Go component | ai | + +> `eventbus.modular` was removed in favour of `messaging.broker.eventbus`. +> `data.transformer` and `workflow.registry` are provided by the `api` plugin (see API & CQRS section above). ## Module Type Reference Detailed configuration reference for module types not covered in the main table above. +--- + +### `openapi` + +Parses an OpenAPI v3 specification file and automatically generates HTTP routes, validates requests and responses against the spec, and optionally serves Swagger UI. Routes are mapped to named pipelines via the `x-pipeline` extension field in the spec. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `spec_file` | string | — | Path to the OpenAPI v3 YAML or JSON spec file (resolved relative to the config file directory). | +| `router` | string | — | Name of the `http.router` module to register routes on. | +| `base_path` | string | `""` | URL path prefix to strip before matching spec paths. | +| `max_body_bytes` | int | `1048576` | Maximum request body size (bytes). | +| `validation.request` | bool | `true` | Validate incoming request bodies, query params, and headers against the spec. | +| `validation.response` | bool | `false` | Validate outgoing response bodies against the spec. | +| `validation.response_action` | string | `"warn"` | Action on response validation failure: `"warn"` (log only) or `"error"` (return 500). | +| `swagger_ui` | bool | `false` | Serve Swagger UI at `/swagger/` (requires `spec_file`). | + +**Route mapping via `x-pipeline`:** + +```yaml +# In your OpenAPI spec: +paths: + /users/{id}: + get: + operationId: getUser + x-pipeline: get-user-pipeline +``` + +```yaml +# In your workflow config: +modules: + - name: api-spec + type: openapi + config: + spec_file: ./api/openapi.yaml + router: main-router + validation: + request: true + response: true + response_action: warn + swagger_ui: true +``` + +--- + +### `auth.m2m` + +Machine-to-machine (M2M) OAuth2 authentication module. Implements the `client_credentials` grant and `urn:ietf:params:oauth:grant-type:jwt-bearer` assertion grant. Issues signed JWTs (ES256 or HS256) and exposes a JWKS endpoint for token verification by third parties. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `algorithm` | string | `"ES256"` | JWT signing algorithm: `"ES256"` (ECDSA P-256) or `"HS256"` (symmetric HMAC). | +| `secret` | string | — | HMAC secret for HS256 (min 32 bytes). Leave empty when using ES256. | +| `privateKey` | string | — | PEM-encoded EC private key for ES256. If omitted, a key is auto-generated at startup. | +| `tokenExpiry` | duration | `"1h"` | Access token expiration duration (e.g., `"15m"`, `"1h"`). | +| `issuer` | string | `"workflow"` | Token `iss` claim. | +| `clients` | array | `[]` | Registered OAuth2 clients: `[{clientId, clientSecret, scopes, description, claims}]`. | +| `introspect` | object | — | Access-control policy for `POST /oauth/introspect`: `{allowOthers, requiredScope, requiredClaim, requiredClaimVal}`. Default: self-only. | + +**HTTP endpoints provided:** + +| Endpoint | Description | +|----------|-------------| +| `POST /oauth/token` | Issue access token (client_credentials or jwt-bearer grant) | +| `GET /oauth/jwks` | JWKS endpoint for public key distribution | +| `POST /oauth/introspect` | Token introspection | + +**Example:** + +```yaml +modules: + - name: m2m-auth + type: auth.m2m + config: + algorithm: ES256 + tokenExpiry: "1h" + issuer: "my-api" + clients: + - clientId: "service-a" + clientSecret: "${SERVICE_A_SECRET}" + scopes: ["read", "write"] + description: "Internal service A" +``` + +--- + +### `api.gateway` + +Composable API gateway that combines routing, authentication, rate limiting, CORS, and reverse proxying into a single module. Each route entry specifies a path prefix, backend service, and optional per-route overrides. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `routes` | array | yes | Route definitions (see below). | +| `globalRateLimit` | object | no | Global rate limit applied to all routes: `{requestsPerMinute, burstSize}`. | +| `cors` | object | no | CORS settings: `{allowOrigins, allowMethods, allowHeaders, maxAge}`. | +| `auth` | object | no | Default auth settings: `{type: bearer\|api_key\|basic, header}`. | + +**Route fields:** + +| Key | Type | Description | +|-----|------|-------------| +| `pathPrefix` | string | URL path prefix to match (e.g., `/api/v1/orders`). | +| `backend` | string | Backend service name or URL. | +| `stripPrefix` | bool | Strip the path prefix before forwarding. Default: `false`. | +| `auth` | bool | Require authentication for this route. | +| `timeout` | duration | Per-route timeout (e.g., `"30s"`). | +| `methods` | array | Allowed HTTP methods. Empty = all methods. | +| `rateLimit` | object | Per-route rate limit override: `{requestsPerMinute, burstSize}`. | + +**Example:** + +```yaml +modules: + - name: gateway + type: api.gateway + config: + globalRateLimit: + requestsPerMinute: 1000 + burstSize: 50 + cors: + allowOrigins: ["*"] + allowMethods: ["GET", "POST", "PUT", "DELETE"] + routes: + - pathPrefix: /api/v1/orders + backend: orders-service + auth: true + timeout: "30s" + - pathPrefix: /api/v1/public + backend: public-service + auth: false +``` + +--- + +### `database.partitioned` + +PostgreSQL partitioned database module for multi-tenant data isolation. Manages LIST or RANGE partition creation and synchronization against a source table of tenant IDs. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `driver` | string | yes | PostgreSQL driver: `"pgx"`, `"pgx/v5"`, or `"postgres"`. | +| `dsn` | string | yes | PostgreSQL connection string. | +| `partitionKey` | string | yes | Column used for partitioning (e.g., `"tenant_id"`). | +| `tables` | array | yes | Tables to manage partitions for. | +| `partitionType` | string | `"list"` | Partition type: `"list"` (FOR VALUES IN) or `"range"` (FOR VALUES FROM/TO). | +| `partitionNameFormat` | string | `"{table}_{tenant}"` | Template for partition table names. Supports `{table}` and `{tenant}` placeholders. | +| `sourceTable` | string | — | Table containing all tenant IDs for auto-partition sync (e.g., `"tenants"`). | +| `sourceColumn` | string | — | Column in source table to query for tenant values. Defaults to `partitionKey`. | +| `maxOpenConns` | int | `25` | Maximum open database connections. | +| `maxIdleConns` | int | `5` | Maximum idle connections in the pool. | + +**Example:** + +```yaml +modules: + - name: tenant-db + type: database.partitioned + config: + driver: pgx + dsn: "${DATABASE_URL}" + partitionKey: tenant_id + tables: + - orders + - events + - sessions + partitionType: list + partitionNameFormat: "{table}_{tenant}" + sourceTable: tenants + sourceColumn: id +``` + +--- + +### `config.provider` + +Application configuration registry with schema validation, default values, and source layering. Processes `config.provider` modules before all other modules so that `{{config "key"}}` references in the rest of the YAML are expanded at load time. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `schema` | array | no | Config key schema definitions: `[{key, type, default, required, description}]`. | +| `sources` | array | no | Value sources loaded in order (later sources override earlier): `[{type: env\|defaults, ...}]`. | + +**Template usage:** + +```yaml +# In any other module's config, reference config registry values: +config: + database_url: "{{config \"DATABASE_URL\"}}" + api_key: "{{config \"API_KEY\"}}" +``` + +**Example:** + +```yaml +modules: + - name: app-config + type: config.provider + config: + schema: + - key: DATABASE_URL + type: string + required: true + description: "PostgreSQL connection string" + - key: API_KEY + type: string + required: true + description: "External API key" + - key: CACHE_TTL + type: string + default: "5m" + description: "Cache entry TTL" + sources: + - type: env +``` + +--- + +### `featureflag.service` + +Feature flag evaluation engine with SQLite persistence and Server-Sent Events (SSE) change streaming. Flag values can be booleans, strings, JSON, or user-segment-based rollouts. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `provider` | string | `"sqlite"` | Storage provider: `"sqlite"` or `"memory"`. | +| `db_path` | string | `"data/featureflags.db"` | SQLite database path. | +| `cache_ttl` | duration | `"5m"` | How long to cache flag evaluations. | +| `sse_enabled` | bool | `false` | Enable SSE endpoint for real-time flag change streaming. | + +**Example:** + +```yaml +modules: + - name: flags + type: featureflag.service + config: + provider: sqlite + db_path: ./data/flags.db + cache_ttl: "1m" + sse_enabled: true +``` + +--- + +### `dlq.service` + +Dead-letter queue (DLQ) service for capturing, inspecting, and replaying failed messages. Backed by an in-memory or SQLite store with configurable retention. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `max_retries` | int | `3` | Maximum delivery attempts before a message is sent to the DLQ. | +| `retention_days` | int | `30` | Number of days to retain dead-lettered messages. | + +**Example:** + +```yaml +modules: + - name: dlq + type: dlq.service + config: + max_retries: 5 + retention_days: 7 +``` + +--- + +### `eventstore.service` + +Append-only event store backed by SQLite for recording execution history. Used by the timeline and replay services. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `db_path` | string | `"data/events.db"` | SQLite database path. | +| `retention_days` | int | `90` | Days to retain recorded events. | + +**Example:** + +```yaml +modules: + - name: event-store + type: eventstore.service + config: + db_path: ./data/events.db + retention_days: 30 +``` + +--- + +### `timeline.service` + +Provides an execution timeline service for step-by-step visualization of past pipeline runs. Reads events from a configured `eventstore.service` module. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `event_store` | string | `"admin-event-store"` | Name of the `eventstore.service` module to read from. | + +**Example:** + +```yaml +modules: + - name: timeline + type: timeline.service + config: + event_store: event-store +``` + +--- + ### Audit Logging (`audit/`) The `audit/` package provides a structured JSON audit logger for recording security-relevant events. It is used internally by the engine and admin platform -- not a YAML module type, but rather a Go library used by other modules. diff --git a/plugins/all/doc_coverage_test.go b/plugins/all/doc_coverage_test.go new file mode 100644 index 00000000..898e7485 --- /dev/null +++ b/plugins/all/doc_coverage_test.go @@ -0,0 +1,75 @@ +package all + +import ( + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/capability" + "github.com/GoCodeAlone/workflow/plugin" + "github.com/GoCodeAlone/workflow/schema" +) + +// TestDocumentationCoverage verifies that every registered module type and +// step type appears in DOCUMENTATION.md at least once (as a backtick-quoted +// string, e.g. `my.module`). This test is intended to catch drift between +// the plugin registrations and the public-facing documentation. +// +// If a new module or step type is added but the documentation is not updated, +// this test will fail with a list of the missing entries so they can be added +// to DOCUMENTATION.md. +func TestDocumentationCoverage(t *testing.T) { + // Locate DOCUMENTATION.md relative to this test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + t.Fatal("runtime.Caller failed") + } + docPath := filepath.Join(filepath.Dir(filename), "..", "..", "DOCUMENTATION.md") + + raw, err := os.ReadFile(docPath) //nolint:gosec // path constructed from known repo structure + if err != nil { + t.Fatalf("read DOCUMENTATION.md: %v", err) + } + docContent := string(raw) + + // Load all built-in plugins into a throwaway loader. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) error: %v", p.Name(), err) + } + } + + // Collect module types missing from docs. + var missingModules []string + for typeName := range loader.ModuleFactories() { + if !strings.Contains(docContent, "`"+typeName+"`") { + missingModules = append(missingModules, typeName) + } + } + + // Collect step types missing from docs. + var missingSteps []string + for typeName := range loader.StepFactories() { + if !strings.Contains(docContent, "`"+typeName+"`") { + missingSteps = append(missingSteps, typeName) + } + } + + if len(missingModules) > 0 { + sort.Strings(missingModules) + t.Errorf("module types registered but not documented in DOCUMENTATION.md (%d missing):\n %s\n\nAdd a row for each type to the appropriate section of DOCUMENTATION.md.", + len(missingModules), strings.Join(missingModules, "\n ")) + } + + if len(missingSteps) > 0 { + sort.Strings(missingSteps) + t.Errorf("step types registered but not documented in DOCUMENTATION.md (%d missing):\n %s\n\nAdd a row for each type to the appropriate section of DOCUMENTATION.md.", + len(missingSteps), strings.Join(missingSteps, "\n ")) + } +} From c17110d277a4eecab3a78b5d44a0fc70273c88a8 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 11 Mar 2026 23:14:43 +0000 Subject: [PATCH 3/6] fix: address code review - fix count accuracy and standardize to US English spelling Co-authored-by: intel352 <77607+intel352@users.noreply.github.com> --- DOCUMENTATION.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index f0e6726e..c4f315e8 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -2,7 +2,7 @@ ## Overview -The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 100+ built-in module types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. +The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 90+ module types and 130+ pipeline step types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. ## Core Engine @@ -19,7 +19,7 @@ The engine is built on the [CrisisTextLine/modular](https://github.com/CrisisTex - `cmd/server` -- runs workflow configs as a server process - `cmd/wfctl` -- validates and inspects workflow configs offline -## Module Types (100+) +## Module Types (90+) All modules are instantiated from YAML config via the plugin factory registry. Organized by category. Each module type is provided by a plugin (see **Plugin** column); all are included when using `plugins/all`. @@ -37,7 +37,7 @@ All modules are instantiated from YAML config via the plugin factory registry. O | `static.fileserver` | Static file serving | http | | `openapi` | OpenAPI v3 spec-driven HTTP route generation with request/response validation and Swagger UI | openapi | -> `httpserver.modular`, `httpclient.modular`, and `chimux.router` were removed in favour of `http.server`, `http.router`, and `reverseproxy`. +> `httpserver.modular`, `httpclient.modular`, and `chimux.router` were removed in favor of `http.server`, `http.router`, and `reverseproxy`. ### Middleware | Type | Description | Plugin | @@ -60,7 +60,7 @@ All modules are instantiated from YAML config via the plugin factory registry. O | `auth.token-blacklist` | Token revocation blacklist backed by SQLite or in-memory store | auth | | `security.field-protection` | Field-level encryption/decryption for sensitive data fields | auth | -> `auth.modular` was removed in favour of `auth.jwt`. +> `auth.modular` was removed in favor of `auth.jwt`. ### API & CQRS | Type | Description | Plugin | @@ -91,7 +91,7 @@ All modules are instantiated from YAML config via the plugin factory registry. O | `notification.slack` | Slack notification sender | messaging | | `webhook.sender` | Outbound webhook delivery with retry and dead letter | messaging | -> `eventbus.modular` was removed in favour of `messaging.broker.eventbus`. +> `eventbus.modular` was removed in favor of `messaging.broker.eventbus`. ### Database & Persistence | Type | Description | Plugin | @@ -100,7 +100,7 @@ All modules are instantiated from YAML config via the plugin factory registry. O | `persistence.store` | Write-through persistence (SQLite/PostgreSQL) | storage | | `database.partitioned` | PostgreSQL partitioned database for multi-tenant data isolation (LIST/RANGE partitions) | storage | -> `database.modular` was removed in favour of `database.workflow`. +> `database.modular` was removed in favor of `database.workflow`. ### NoSQL / Datastores | Type | Description | Plugin | @@ -504,7 +504,7 @@ value: '{{ index .steps "parse-request" "path_params" "id" }}' | `jsonschema.modular` | JSON Schema validation | modularcompat | | `dynamic.component` | Yaegi hot-reload Go component | ai | -> `eventbus.modular` was removed in favour of `messaging.broker.eventbus`. +> `eventbus.modular` was removed in favor of `messaging.broker.eventbus`. > `data.transformer` and `workflow.registry` are provided by the `api` plugin (see API & CQRS section above). ## Module Type Reference From 03e88770f344a2e0e9ee16c7bb983a14f7623c0a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 11 Mar 2026 23:28:48 +0000 Subject: [PATCH 4/6] feat(mcp): add step/module coverage validation and expose DOCUMENTATION.md as MCP resource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add TestListStepTypes_AllBuiltinsPresent and TestListModuleTypes_AllBuiltinsPresent in mcp/step_coverage_test.go: loads all built-in plugins via plugins/all.DefaultPlugins(), registers their types into global schema registries, and verifies handleListStepTypes / handleListModuleTypes return every registered type (MCP equivalent of TestDocumentationCoverage) - Add workflow://docs/full-reference MCP resource that serves DOCUMENTATION.md content; resolves file via: explicit WithDocumentationFile path → pluginDir-relative path → CWD → graceful fallback message with public repo URL - Add WithDocumentationFile ServerOption to mcp.Server for explicit doc file path - Add -documentation-file flag to `wfctl mcp` command - Add TestDocsFullReference_* tests for fallback, WithFile, and repo file cases Co-authored-by: intel352 <77607+intel352@users.noreply.github.com> --- cmd/wfctl/mcp.go | 4 + mcp/server.go | 91 +++++++++++++- mcp/step_coverage_test.go | 247 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 338 insertions(+), 4 deletions(-) create mode 100644 mcp/step_coverage_test.go diff --git a/cmd/wfctl/mcp.go b/cmd/wfctl/mcp.go index a4135a1b..4ef78ac5 100644 --- a/cmd/wfctl/mcp.go +++ b/cmd/wfctl/mcp.go @@ -13,6 +13,7 @@ func runMCP(args []string) error { fs := flag.NewFlagSet("mcp", flag.ContinueOnError) pluginDir := fs.String("plugin-dir", "data/plugins", "Plugin data directory") registryDir := fs.String("registry-dir", "", "Path to cloned workflow-registry for plugin search") + documentationFile := fs.String("documentation-file", "", "Path to DOCUMENTATION.md (auto-detected when empty)") fs.Usage = func() { fmt.Fprintf(fs.Output(), `Usage: wfctl mcp [options] @@ -54,6 +55,9 @@ See docs/mcp.md for full setup instructions. if *registryDir != "" { opts = append(opts, workflowmcp.WithRegistryDir(*registryDir)) } + if *documentationFile != "" { + opts = append(opts, workflowmcp.WithDocumentationFile(*documentationFile)) + } srv := workflowmcp.NewServer(*pluginDir, opts...) return srv.ServeStdio() diff --git a/mcp/server.go b/mcp/server.go index 968949bf..61bdf5f4 100644 --- a/mcp/server.go +++ b/mcp/server.go @@ -57,13 +57,25 @@ func WithRegistryDir(dir string) ServerOption { } } +// WithDocumentationFile sets an explicit path to DOCUMENTATION.md so that the +// workflow://docs/full-reference MCP resource serves the actual repo documentation. +// When not set the server attempts to locate the file automatically (see +// handleDocsFullReference). If the file cannot be found the resource returns a +// brief message directing users to the public documentation URL. +func WithDocumentationFile(path string) ServerOption { + return func(s *Server) { + s.documentationFile = path + } +} + // Server wraps an MCP server instance and provides workflow-engine-specific // tools and resources. type Server struct { - mcpServer *server.MCPServer - pluginDir string - registryDir string - engine EngineProvider // optional; enables execution tools when set + mcpServer *server.MCPServer + pluginDir string + registryDir string + documentationFile string // optional explicit path to DOCUMENTATION.md + engine EngineProvider // optional; enables execution tools when set } // NewServer creates a new MCP server with all workflow engine tools and @@ -278,6 +290,16 @@ func (s *Server) registerResources() { ), s.handleDocsModuleReference, ) + + s.mcpServer.AddResource( + mcp.NewResource( + "workflow://docs/full-reference", + "Full Workflow Engine Documentation", + mcp.WithResourceDescription("Complete DOCUMENTATION.md from the GoCodeAlone/workflow repository: all module types, step types, pipeline steps, template functions, configuration format, workflow types, trigger types, CI/CD steps, platform steps, and detailed per-module reference."), + mcp.WithMIMEType("text/markdown"), + ), + s.handleDocsFullReference, + ) } // --- Tool Handlers --- @@ -635,6 +657,67 @@ func (s *Server) handleDocsModuleReference(_ context.Context, _ mcp.ReadResource }, nil } +// handleDocsFullReference serves the complete DOCUMENTATION.md from the +// GoCodeAlone/workflow repository. It resolves the file in this order: +// 1. The explicit path set via WithDocumentationFile (if provided). +// 2. A path derived from the plugin directory (same parent-of-data layout used +// by handleGetConfigExamples): /../../DOCUMENTATION.md. +// 3. DOCUMENTATION.md in the current working directory. +// +// If none of the candidates can be read, a fallback message with the public +// documentation URL is returned so the resource is always usable. +func (s *Server) handleDocsFullReference(_ context.Context, _ mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) { + content := s.resolveDocumentationContent() + return []mcp.ResourceContents{ + mcp.TextResourceContents{ + URI: "workflow://docs/full-reference", + MIMEType: "text/markdown", + Text: content, + }, + }, nil +} + +// resolveDocumentationContent attempts to read DOCUMENTATION.md from several +// well-known locations and returns its content, or a fallback string on failure. +func (s *Server) resolveDocumentationContent() string { + candidates := s.documentationFileCandidates() + for _, p := range candidates { + if data, err := os.ReadFile(p); err == nil { //nolint:gosec // G304: path derived from trusted server config + return string(data) + } + } + return "# GoCodeAlone/workflow Documentation\n\n" + + "The full documentation (DOCUMENTATION.md) could not be found on the local filesystem.\n\n" + + "Please refer to the repository documentation at:\n" + + "https://github.com/GoCodeAlone/workflow/blob/main/DOCUMENTATION.md\n" +} + +// documentationFileCandidates returns ordered candidate paths for DOCUMENTATION.md. +func (s *Server) documentationFileCandidates() []string { + var candidates []string + + // 1. Explicit override via WithDocumentationFile. + if s.documentationFile != "" { + candidates = append(candidates, s.documentationFile) + } + + // 2. Derive from pluginDir: = .../data/plugins → root = pluginDir/../.. + if s.pluginDir != "" { + pluginBase := filepath.Base(s.pluginDir) + dataDir := filepath.Dir(s.pluginDir) + dataBase := filepath.Base(dataDir) + if pluginBase == "plugins" && dataBase == "data" { + root := filepath.Dir(dataDir) + candidates = append(candidates, filepath.Join(root, "DOCUMENTATION.md")) + } + } + + // 3. Current working directory. + candidates = append(candidates, "DOCUMENTATION.md") + + return candidates +} + // --- Helpers --- func marshalToolResult(v any) (*mcp.CallToolResult, error) { diff --git a/mcp/step_coverage_test.go b/mcp/step_coverage_test.go new file mode 100644 index 00000000..d8696013 --- /dev/null +++ b/mcp/step_coverage_test.go @@ -0,0 +1,247 @@ +package mcp + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/capability" + "github.com/GoCodeAlone/workflow/plugin" + pluginall "github.com/GoCodeAlone/workflow/plugins/all" + "github.com/GoCodeAlone/workflow/schema" + "github.com/mark3labs/mcp-go/mcp" +) + +// registerBuiltinPluginTypesForTest loads all built-in plugins into the global +// schema registries (schema.KnownModuleTypes / schema.GetStepSchemaRegistry) +// so that MCP tools that rely on these registries reflect the full type set. +// This mirrors what happens at runtime when the workflow engine calls LoadPlugin +// for each built-in plugin. +func registerBuiltinPluginTypesForTest(t *testing.T) { + t.Helper() + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + // Register module and step types into the global schema registry so + // that schema.KnownModuleTypes() and handleListStepTypes see them. + for typeName := range loader.ModuleFactories() { + schema.RegisterModuleType(typeName) + } + for typeName := range loader.StepFactories() { + schema.RegisterModuleType(typeName) + } + // Register rich step schemas (descriptions, config fields, outputs). + for _, ss := range loader.StepSchemaRegistry().All() { + schema.GetStepSchemaRegistry().Register(ss) + } + } +} + +// TestListStepTypes_AllBuiltinsPresent validates that every step type registered +// by the built-in plugins (plugins/all) appears in the MCP list_step_types tool +// response. This is the MCP equivalent of TestDocumentationCoverage and ensures +// that wfctl's MCP server accurately reflects all available step types. +func TestListStepTypes_AllBuiltinsPresent(t *testing.T) { + registerBuiltinPluginTypesForTest(t) + + srv := NewServer("") + result, err := srv.handleListStepTypes(context.Background(), mcp.CallToolRequest{}) + if err != nil { + t.Fatalf("handleListStepTypes error: %v", err) + } + + text := extractText(t, result) + var data map[string]any + if err := json.Unmarshal([]byte(text), &data); err != nil { + t.Fatalf("failed to parse result JSON: %v", err) + } + + steps, ok := data["step_types"].([]any) + if !ok { + t.Fatal("step_types not found in result") + } + listed := make(map[string]bool, len(steps)) + for _, s := range steps { + if entry, ok := s.(map[string]any); ok { + if typeName, ok := entry["type"].(string); ok { + listed[typeName] = true + } + } + } + + // Collect all step types from the built-in plugins. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + } + + var missing []string + for typeName := range loader.StepFactories() { + if !listed[typeName] { + missing = append(missing, typeName) + } + } + + if len(missing) > 0 { + sort.Strings(missing) + t.Errorf("step types registered by built-in plugins but missing from list_step_types (%d missing):\n %s\n\n"+ + "Add these step types to schema/schema.go coreModuleTypes slice "+ + "or register them via schema.RegisterModuleType so they appear in KnownModuleTypes.", + len(missing), strings.Join(missing, "\n ")) + } +} + +// TestListModuleTypes_AllBuiltinsPresent validates that every module type registered +// by the built-in plugins (plugins/all) appears in the MCP list_module_types tool +// response. +func TestListModuleTypes_AllBuiltinsPresent(t *testing.T) { + registerBuiltinPluginTypesForTest(t) + + srv := NewServer("") + result, err := srv.handleListModuleTypes(context.Background(), mcp.CallToolRequest{}) + if err != nil { + t.Fatalf("handleListModuleTypes error: %v", err) + } + + text := extractText(t, result) + var data map[string]any + if err := json.Unmarshal([]byte(text), &data); err != nil { + t.Fatalf("failed to parse result JSON: %v", err) + } + + rawTypes, ok := data["module_types"].([]any) + if !ok { + t.Fatal("module_types not found in result") + } + listed := make(map[string]bool, len(rawTypes)) + for _, mt := range rawTypes { + if s, ok := mt.(string); ok { + listed[s] = true + } + } + + // Collect all module types from the built-in plugins. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + } + + var missing []string + for typeName := range loader.ModuleFactories() { + if !listed[typeName] { + missing = append(missing, typeName) + } + } + + if len(missing) > 0 { + sort.Strings(missing) + t.Errorf("module types registered by built-in plugins but missing from list_module_types (%d missing):\n %s\n\n"+ + "Add these module types to schema/schema.go coreModuleTypes slice "+ + "or register them via schema.RegisterModuleType so they appear in KnownModuleTypes.", + len(missing), strings.Join(missing, "\n ")) + } +} + +// TestDocsFullReference_Fallback verifies that the full-reference resource +// returns a usable fallback when DOCUMENTATION.md cannot be found. +func TestDocsFullReference_Fallback(t *testing.T) { + // Use a server with a non-existent plugin dir so no file is found. + srv := NewServer("/nonexistent/data/plugins") + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(contents) != 1 { + t.Fatalf("expected 1 resource content, got %d", len(contents)) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + if text.URI != "workflow://docs/full-reference" { + t.Errorf("unexpected URI: %q", text.URI) + } + if text.MIMEType != "text/markdown" { + t.Errorf("unexpected MIME type: %q", text.MIMEType) + } + if !strings.Contains(text.Text, "GoCodeAlone/workflow") { + t.Error("fallback text should mention 'GoCodeAlone/workflow'") + } +} + +// TestDocsFullReference_WithFile verifies that the full-reference resource +// serves the provided file content when WithDocumentationFile is used. +func TestDocsFullReference_WithFile(t *testing.T) { + // Write a temporary DOCUMENTATION.md-like file. + dir := t.TempDir() + docPath := filepath.Join(dir, "DOCUMENTATION.md") + content := "# Workflow Engine Documentation\n\nTest content.\n" + if err := os.WriteFile(docPath, []byte(content), 0600); err != nil { + t.Fatalf("failed to write temp file: %v", err) + } + + srv := NewServer("", WithDocumentationFile(docPath)) + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(contents) != 1 { + t.Fatalf("expected 1 resource content, got %d", len(contents)) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + if text.Text != content { + t.Errorf("expected file content %q, got %q", content, text.Text) + } +} + +// TestDocsFullReference_RepoFile verifies that the full-reference resource +// serves the actual DOCUMENTATION.md when it exists next to the test. +func TestDocsFullReference_RepoFile(t *testing.T) { + // Locate the repo root via the test file's path. + _, testFilePath, _, ok := runtime.Caller(0) + if !ok { + t.Skip("runtime.Caller failed") + } + repoRoot := filepath.Join(filepath.Dir(testFilePath), "..") + docPath := filepath.Join(repoRoot, "DOCUMENTATION.md") + if _, err := os.Stat(docPath); err != nil { + t.Skipf("DOCUMENTATION.md not found at %q: %v", docPath, err) + } + + srv := NewServer("", WithDocumentationFile(docPath)) + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + + // Spot-check a few key strings that should be in DOCUMENTATION.md. + for _, want := range []string{"openapi", "auth.m2m", "database.partitioned", "config.provider"} { + if !strings.Contains(text.Text, want) { + t.Errorf("DOCUMENTATION.md should contain %q", want) + } + } +} From 4673afa859c1fa5f3609ff1b3f4117d3fe3e0924 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 12 Mar 2026 00:12:16 +0000 Subject: [PATCH 5/6] fix: apply PR review feedback on openapi docs accuracy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add plugins/openapi to plugins/all.DefaultPlugins() so openapi module type is part of the standard plugin set (resolves review comment on line 24 claiming all types are in plugins/all) - Fix openapi swagger_ui config docs: it's an object {enabled, path}, not a bool; default path is /docs not /swagger/ (review comment line 524-532) - Remove validation.response and validation.response_action from openapi docs and example — Validation.Response field exists in the struct but is never read in the route handling code, and ResponseAction does not exist at all (review comment line 526-531) - Fix openapi module table description: "request/response validation" → "request validation" Co-authored-by: intel352 <77607+intel352@users.noreply.github.com> --- DOCUMENTATION.md | 15 +++++++-------- plugins/all/all.go | 2 ++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index c4f315e8..6fcabe51 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -35,7 +35,7 @@ All modules are instantiated from YAML config via the plugin factory registry. O | `http.simple_proxy` | Simplified proxy configuration | http | | `reverseproxy` | Modular framework reverse proxy (v2) | http | | `static.fileserver` | Static file serving | http | -| `openapi` | OpenAPI v3 spec-driven HTTP route generation with request/response validation and Swagger UI | openapi | +| `openapi` | OpenAPI v3 spec-driven HTTP route generation with request validation and Swagger UI | openapi | > `httpserver.modular`, `httpclient.modular`, and `chimux.router` were removed in favor of `http.server`, `http.router`, and `reverseproxy`. @@ -515,7 +515,7 @@ Detailed configuration reference for module types not covered in the main table ### `openapi` -Parses an OpenAPI v3 specification file and automatically generates HTTP routes, validates requests and responses against the spec, and optionally serves Swagger UI. Routes are mapped to named pipelines via the `x-pipeline` extension field in the spec. +Parses an OpenAPI v3 specification file and automatically generates HTTP routes, validates incoming requests against the spec, and optionally serves Swagger UI. Routes are mapped to named pipelines via the `x-pipeline` extension field in the spec. **Configuration:** @@ -526,9 +526,8 @@ Parses an OpenAPI v3 specification file and automatically generates HTTP routes, | `base_path` | string | `""` | URL path prefix to strip before matching spec paths. | | `max_body_bytes` | int | `1048576` | Maximum request body size (bytes). | | `validation.request` | bool | `true` | Validate incoming request bodies, query params, and headers against the spec. | -| `validation.response` | bool | `false` | Validate outgoing response bodies against the spec. | -| `validation.response_action` | string | `"warn"` | Action on response validation failure: `"warn"` (log only) or `"error"` (return 500). | -| `swagger_ui` | bool | `false` | Serve Swagger UI at `/swagger/` (requires `spec_file`). | +| `swagger_ui.enabled` | bool | `false` | Serve Swagger UI (requires `spec_file`). | +| `swagger_ui.path` | string | `"/docs"` | URL path at which the Swagger UI is served. | **Route mapping via `x-pipeline`:** @@ -551,9 +550,9 @@ modules: router: main-router validation: request: true - response: true - response_action: warn - swagger_ui: true + swagger_ui: + enabled: true + path: /docs ``` --- diff --git a/plugins/all/all.go b/plugins/all/all.go index d5453a32..9056a6d9 100644 --- a/plugins/all/all.go +++ b/plugins/all/all.go @@ -42,6 +42,7 @@ import ( pluginmessaging "github.com/GoCodeAlone/workflow/plugins/messaging" pluginmodcompat "github.com/GoCodeAlone/workflow/plugins/modularcompat" pluginobs "github.com/GoCodeAlone/workflow/plugins/observability" + pluginopenapi "github.com/GoCodeAlone/workflow/plugins/openapi" pluginpipeline "github.com/GoCodeAlone/workflow/plugins/pipelinesteps" pluginplatform "github.com/GoCodeAlone/workflow/plugins/platform" pluginpolicy "github.com/GoCodeAlone/workflow/plugins/policy" @@ -67,6 +68,7 @@ func DefaultPlugins() []plugin.EnginePlugin { pluginlicense.New(), pluginconfigprovider.New(), pluginhttp.New(), + pluginopenapi.New(), pluginobs.New(), pluginmessaging.New(), pluginsm.New(), From 8e0b40e02d2c5bdecdc0e49c5f761e3d0124d40c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 12 Mar 2026 10:18:59 +0000 Subject: [PATCH 6/6] =?UTF-8?q?merge:=20resolve=20DOCUMENTATION.md=20confl?= =?UTF-8?q?ict=20with=20main=20=E2=80=94=20add=20step.json=5Fparse,=20rest?= =?UTF-8?q?ore=20response=20validation,=20trustedKeys,=20autoSync=20docs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: intel352 <77607+intel352@users.noreply.github.com> --- DOCUMENTATION.md | 7 + e2e_middleware_test.go | 138 ++++ example/go.mod | 14 +- example/go.sum | 28 +- example/openapi-jsonapi-articles.yaml | 106 +++ example/openapi-petstore.yaml | 3 +- example/specs/jsonapi-articles.yaml | 189 +++++ example/specs/petstore.yaml | 74 +- modernize/rules.go | 79 +- modernize/rules_test.go | 112 +++ module/auth_m2m.go | 143 +++- module/auth_m2m_test.go | 201 ++++- module/database.go | 5 +- module/database_partitioned.go | 102 ++- module/database_partitioned_test.go | 233 ++++++ module/database_scan_helpers.go | 41 + module/http_middleware.go | 102 ++- module/http_middleware_test.go | 207 ++++- module/openapi.go | 359 ++++++++- module/openapi_test.go | 994 ++++++++++++++++++++++++ module/pipeline_step_db_query.go | 6 +- module/pipeline_step_db_query_cached.go | 4 +- module/pipeline_step_db_query_test.go | 82 ++ module/pipeline_step_json_parse.go | 82 ++ module/pipeline_step_json_parse_test.go | 216 +++++ module/pipeline_step_resilience.go | 7 + module/platform_do_database.go | 6 +- module/platform_do_database_test.go | 12 +- module/scan_provider_test.go | 10 +- plugins/auth/plugin.go | 44 ++ plugins/auth/plugin_test.go | 126 +++ plugins/http/modules.go | 32 +- plugins/http/schemas.go | 12 +- plugins/pipelinesteps/plugin.go | 8 +- plugins/pipelinesteps/plugin_test.go | 1 + plugins/storage/plugin.go | 10 +- 36 files changed, 3655 insertions(+), 140 deletions(-) create mode 100644 example/openapi-jsonapi-articles.yaml create mode 100644 example/specs/jsonapi-articles.yaml create mode 100644 module/database_scan_helpers.go create mode 100644 module/pipeline_step_json_parse.go create mode 100644 module/pipeline_step_json_parse_test.go diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index 6fcabe51..6cbc8fb9 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -153,6 +153,7 @@ flowchart TD | `step.db_sync_partitions` | Ensures future partitions exist for a partitioned table | pipelinesteps | | `step.json_response` | Writes HTTP JSON response with custom status code and headers | pipelinesteps | | `step.raw_response` | Writes a raw HTTP response with arbitrary content type | pipelinesteps | +| `step.json_parse` | Parses a JSON string (or `[]byte`) in the pipeline context into a structured object | pipelinesteps | | `step.static_file` | Serves a pre-loaded file from disk as an HTTP response | pipelinesteps | | `step.workflow_call` | Invokes another workflow pipeline by name | pipelinesteps | | `step.sub_workflow` | Executes a named sub-workflow inline and merges its output | ai | @@ -526,6 +527,8 @@ Parses an OpenAPI v3 specification file and automatically generates HTTP routes, | `base_path` | string | `""` | URL path prefix to strip before matching spec paths. | | `max_body_bytes` | int | `1048576` | Maximum request body size (bytes). | | `validation.request` | bool | `true` | Validate incoming request bodies, query params, and headers against the spec. | +| `validation.response` | bool | `false` | Validate pipeline response bodies against the spec's response schemas. | +| `validation.response_action` | string | `"warn"` | Action when response validation fails: `"warn"` (log only) or `"error"` (return HTTP 500). | | `swagger_ui.enabled` | bool | `false` | Serve Swagger UI (requires `spec_file`). | | `swagger_ui.path` | string | `"/docs"` | URL path at which the Swagger UI is served. | @@ -550,6 +553,8 @@ modules: router: main-router validation: request: true + response: true + response_action: warn swagger_ui: enabled: true path: /docs @@ -572,6 +577,7 @@ Machine-to-machine (M2M) OAuth2 authentication module. Implements the `client_cr | `issuer` | string | `"workflow"` | Token `iss` claim. | | `clients` | array | `[]` | Registered OAuth2 clients: `[{clientId, clientSecret, scopes, description, claims}]`. | | `introspect` | object | — | Access-control policy for `POST /oauth/introspect`: `{allowOthers, requiredScope, requiredClaim, requiredClaimVal}`. Default: self-only. | +| `trustedKeys` | array | `[]` | Trusted external JWT issuers for JWT-bearer assertion grants: `[{issuer, publicKeyPEM, algorithm, audiences, claimMapping}]`. Supports literal `\n` in PEM values. | **HTTP endpoints provided:** @@ -666,6 +672,7 @@ PostgreSQL partitioned database module for multi-tenant data isolation. Manages | `partitionNameFormat` | string | `"{table}_{tenant}"` | Template for partition table names. Supports `{table}` and `{tenant}` placeholders. | | `sourceTable` | string | — | Table containing all tenant IDs for auto-partition sync (e.g., `"tenants"`). | | `sourceColumn` | string | — | Column in source table to query for tenant values. Defaults to `partitionKey`. | +| `autoSync` | bool | `true` | Automatically sync partitions from `sourceTable` on engine start. Defaults to `true` when `sourceTable` is set, `false` otherwise. | | `maxOpenConns` | int | `25` | Maximum open database connections. | | `maxIdleConns` | int | `5` | Maximum idle connections in the pool. | diff --git a/e2e_middleware_test.go b/e2e_middleware_test.go index c7d5c713..e0ff7693 100644 --- a/e2e_middleware_test.go +++ b/e2e_middleware_test.go @@ -416,6 +416,144 @@ func TestE2E_Middleware_CORS(t *testing.T) { t.Log("E2E Middleware CORS: Allowed, disallowed, headers, and preflight scenarios verified") } +// TestE2E_Middleware_CORS_FullConfig verifies that the CORS middleware factory correctly +// applies allowedHeaders, allowCredentials, maxAge, and wildcard subdomain origin matching. +func TestE2E_Middleware_CORS_FullConfig(t *testing.T) { + port := getFreePort(t) + addr := fmt.Sprintf(":%d", port) + baseURL := fmt.Sprintf("http://127.0.0.1:%d", port) + + cfg := &config.WorkflowConfig{ + Modules: []config.ModuleConfig{ + {Name: "fc-server", Type: "http.server", Config: map[string]any{"address": addr}}, + {Name: "fc-router", Type: "http.router", DependsOn: []string{"fc-server"}}, + {Name: "fc-handler", Type: "http.handler", DependsOn: []string{"fc-router"}, Config: map[string]any{"contentType": "application/json"}}, + {Name: "fc-cors", Type: "http.middleware.cors", DependsOn: []string{"fc-router"}, Config: map[string]any{ + "allowedOrigins": []any{"*.example.com", "https://trusted.io"}, + "allowedMethods": []any{"GET", "POST", "OPTIONS"}, + "allowedHeaders": []any{"Authorization", "Content-Type", "X-CSRF-Token", "X-Request-Id"}, + "allowCredentials": true, + "maxAge": 3600, + }}, + }, + Workflows: map[string]any{ + "http": map[string]any{ + "server": "fc-server", + "router": "fc-router", + "routes": []any{ + map[string]any{ + "method": "GET", + "path": "/api/fc-test", + "handler": "fc-handler", + "middlewares": []any{"fc-cors"}, + }, + }, + }, + }, + Triggers: map[string]any{}, + } + + logger := &mockLogger{} + app := modular.NewStdApplication(modular.NewStdConfigProvider(nil), logger) + engine := NewStdEngine(app, logger) + loadAllPlugins(t, engine) + engine.RegisterWorkflowHandler(handlers.NewHTTPWorkflowHandler()) + + if err := engine.BuildFromConfig(cfg); err != nil { + t.Fatalf("BuildFromConfig failed: %v", err) + } + + ctx := t.Context() + if err := engine.Start(ctx); err != nil { + t.Fatalf("Engine start failed: %v", err) + } + defer engine.Stop(context.Background()) + + waitForServer(t, baseURL, 5*time.Second) + client := &http.Client{Timeout: 5 * time.Second} + + // Subtest 1: Configurable allowedHeaders are reflected + t.Run("configurable_headers", func(t *testing.T) { + req, _ := http.NewRequest("GET", baseURL+"/api/fc-test", nil) + req.Header.Set("Origin", "http://app.example.com") + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer resp.Body.Close() + + acah := resp.Header.Get("Access-Control-Allow-Headers") + want := "Authorization, Content-Type, X-CSRF-Token, X-Request-Id" + if acah != want { + t.Errorf("Expected Access-Control-Allow-Headers %q, got %q", want, acah) + } + }) + + // Subtest 2: allowCredentials sets the Credentials header + t.Run("allow_credentials", func(t *testing.T) { + req, _ := http.NewRequest("GET", baseURL+"/api/fc-test", nil) + req.Header.Set("Origin", "https://trusted.io") + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer resp.Body.Close() + + if resp.Header.Get("Access-Control-Allow-Credentials") != "true" { + t.Errorf("Expected Access-Control-Allow-Credentials: true, got %q", resp.Header.Get("Access-Control-Allow-Credentials")) + } + }) + + // Subtest 3: maxAge is set on responses + t.Run("max_age", func(t *testing.T) { + req, _ := http.NewRequest("GET", baseURL+"/api/fc-test", nil) + req.Header.Set("Origin", "https://trusted.io") + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer resp.Body.Close() + + if resp.Header.Get("Access-Control-Max-Age") != "3600" { + t.Errorf("Expected Access-Control-Max-Age: 3600, got %q", resp.Header.Get("Access-Control-Max-Age")) + } + }) + + // Subtest 4: Wildcard subdomain matching + t.Run("wildcard_subdomain", func(t *testing.T) { + req, _ := http.NewRequest("GET", baseURL+"/api/fc-test", nil) + req.Header.Set("Origin", "http://admin.example.com") + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer resp.Body.Close() + + acao := resp.Header.Get("Access-Control-Allow-Origin") + if acao != "http://admin.example.com" { + t.Errorf("Expected Access-Control-Allow-Origin 'http://admin.example.com', got %q", acao) + } + }) + + // Subtest 5: Wildcard subdomain does not match unrelated domains + t.Run("wildcard_subdomain_no_match", func(t *testing.T) { + req, _ := http.NewRequest("GET", baseURL+"/api/fc-test", nil) + req.Header.Set("Origin", "http://evil.com") + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer resp.Body.Close() + + acao := resp.Header.Get("Access-Control-Allow-Origin") + if acao != "" { + t.Errorf("Expected no Access-Control-Allow-Origin for disallowed domain, got %q", acao) + } + }) + + t.Log("E2E Middleware CORS FullConfig: all new features verified") +} + // TestE2E_Middleware_RequestID verifies the RequestID middleware adds an // X-Request-ID header to every response, and preserves a client-supplied one. func TestE2E_Middleware_RequestID(t *testing.T) { diff --git a/example/go.mod b/example/go.mod index b00541e5..ae6989fb 100644 --- a/example/go.mod +++ b/example/go.mod @@ -5,7 +5,7 @@ go 1.26.0 replace github.com/GoCodeAlone/workflow => ../ require ( - github.com/GoCodeAlone/modular v1.12.0 + github.com/GoCodeAlone/modular v1.12.3 github.com/GoCodeAlone/workflow v0.0.0-00010101000000-000000000000 ) @@ -20,12 +20,12 @@ require ( cloud.google.com/go/storage v1.60.0 // indirect github.com/BurntSushi/toml v1.6.0 // indirect github.com/DataDog/datadog-go/v5 v5.4.0 // indirect - github.com/GoCodeAlone/modular/modules/auth v1.12.0 // indirect - github.com/GoCodeAlone/modular/modules/cache v1.12.0 // indirect - github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.5.0 // indirect - github.com/GoCodeAlone/modular/modules/jsonschema v1.12.0 // indirect - github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.5.0 // indirect - github.com/GoCodeAlone/modular/modules/scheduler v1.12.0 // indirect + github.com/GoCodeAlone/modular/modules/auth v1.14.0 // indirect + github.com/GoCodeAlone/modular/modules/cache v1.14.0 // indirect + github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.7.0 // indirect + github.com/GoCodeAlone/modular/modules/jsonschema v1.14.0 // indirect + github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.7.0 // indirect + github.com/GoCodeAlone/modular/modules/scheduler v1.14.0 // indirect github.com/GoCodeAlone/yaegi v0.17.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect diff --git a/example/go.sum b/example/go.sum index f1c81b9c..b872c630 100644 --- a/example/go.sum +++ b/example/go.sum @@ -30,20 +30,20 @@ github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2 github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.4.0 h1:Ea3eXUVwrVV28F/fo3Dr3aa+TL/Z7Xi6SUPKW8L99aI= github.com/DataDog/datadog-go/v5 v5.4.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/GoCodeAlone/modular v1.12.0 h1:C4tLfJe65rrUQsbtndiVfldtT8IRKZcHczNRNbBK4wo= -github.com/GoCodeAlone/modular v1.12.0/go.mod h1:ET7mlekRjkRq9mwJdWmaC2KDUWvjla2IqKVFrYO2JnY= -github.com/GoCodeAlone/modular/modules/auth v1.12.0 h1:eO4iq8tkz8W5sLKRSG5dC+ACITMtxZrtSJ+ReE3fKdA= -github.com/GoCodeAlone/modular/modules/auth v1.12.0/go.mod h1:D+yfkgN3MTkyl1xe8h2UL7uqB9Vj1lO3wUrscfnJ/NU= -github.com/GoCodeAlone/modular/modules/cache v1.12.0 h1:Ue6aXytFq1I+OnC3PcV2KlUg4lHiuGWH0Qq+v/lqyp0= -github.com/GoCodeAlone/modular/modules/cache v1.12.0/go.mod h1:kSaT8wNy/3YGmtIpDqPbW6MRqKOp2yc8a5MHdAag2CE= -github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.5.0 h1:K6X+X1sOq+lpI1Oa+XUzH+GlSRYJQfDTTcvMjZfkbFU= -github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.5.0/go.mod h1:Q0TpCFTtd0q20okDyi63ALS+1xmkYU4wNUOqwczyih0= -github.com/GoCodeAlone/modular/modules/jsonschema v1.12.0 h1:urGK8Xtwku4tn8nBeVZn9UqvldnCptZ3rLCXO21vSz4= -github.com/GoCodeAlone/modular/modules/jsonschema v1.12.0/go.mod h1:+/0p1alfSbhhshcNRId1HRRIupeu0DPC7BH8AYiBQ1I= -github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.5.0 h1:zcF46oZ7MJFfZCmzqc1n9ZTw6wrTJSFr04yaz6EYKeo= -github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.5.0/go.mod h1:ycmJYst0dgaeLYBDOFGYz3ZiVK0fVcbl59omBySpKis= -github.com/GoCodeAlone/modular/modules/scheduler v1.12.0 h1:kxeLUpFFZ2HWV5B7Ra1WaOr1DDee5G6kAZ6F1BUXX/Y= -github.com/GoCodeAlone/modular/modules/scheduler v1.12.0/go.mod h1:VpDSAU0Guj8geVz19YCSknyCJp0j3TMBaxLEYXedkZc= +github.com/GoCodeAlone/modular v1.12.3 h1:WcNqc1ZG+Lv/xzF8wTDavGIOeAvlV4wEd5HO2mVTUwE= +github.com/GoCodeAlone/modular v1.12.3/go.mod h1:nDdyW/eJu4gDFNueb6vWwLvti3bPHSZJHkWGiwEmi2I= +github.com/GoCodeAlone/modular/modules/auth v1.14.0 h1:Y+p4/HIcxkajlcNhcPlqpwAt1SCHjB4AaDMEys50E3I= +github.com/GoCodeAlone/modular/modules/auth v1.14.0/go.mod h1:fkwPn2svDsCHBI19gtUHxo064SL+EudjB+o7VjL9ug8= +github.com/GoCodeAlone/modular/modules/cache v1.14.0 h1:ykQRwXJGXaRtAsnW9Tgs0LvXExonkKr8P7XIHxPaYdY= +github.com/GoCodeAlone/modular/modules/cache v1.14.0/go.mod h1:tcIjHJHZ5fVU8sstILrXeVQgjpZcUkErnNjRaxkBSR8= +github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.7.0 h1:clGAyaOfyDc9iY63ONfZiHReVccVhK/yH19QEb14SSI= +github.com/GoCodeAlone/modular/modules/eventbus/v2 v2.7.0/go.mod h1:0AnfWGVmrqyv91rduc6mrPqW6WQchDAa2WtM0Qmw/WA= +github.com/GoCodeAlone/modular/modules/jsonschema v1.14.0 h1:dCiPIO+NvJPizfCeUQqGXHD1WitOVYpKuL3fxMEjRlw= +github.com/GoCodeAlone/modular/modules/jsonschema v1.14.0/go.mod h1:5Hm+R9G41wwb0hKefx9+9PMqffjU1tA7roW3t3sTaLE= +github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.7.0 h1:TtVD+tE8ABN98n50MFVyMAvMsBM4JE86KRgCRDzPDC4= +github.com/GoCodeAlone/modular/modules/reverseproxy/v2 v2.7.0/go.mod h1:N7d8aSV4eqr90qjlIOs/8EmW7avt9gwX06Uh+zKDr4s= +github.com/GoCodeAlone/modular/modules/scheduler v1.14.0 h1:JSrzo4FB7uGASExv+fCLRd6pXWULV1mJYvzmM9PzUeM= +github.com/GoCodeAlone/modular/modules/scheduler v1.14.0/go.mod h1:emkR2AnilabLJZv1rOTDO9eGpRBmZs487H00Lnp9jIc= github.com/GoCodeAlone/yaegi v0.17.1 h1:aPAwU29L9cGceRAff02c5pjQcT5KapDB4fWFZK9tElE= github.com/GoCodeAlone/yaegi v0.17.1/go.mod h1:z5Pr6Wse6QJcQvpgxTxzMAevFarH0N37TG88Y9dprx0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= diff --git a/example/openapi-jsonapi-articles.yaml b/example/openapi-jsonapi-articles.yaml new file mode 100644 index 00000000..7d339455 --- /dev/null +++ b/example/openapi-jsonapi-articles.yaml @@ -0,0 +1,106 @@ +# OpenAPI Response Validation — JSON:API Example +# +# This configuration demonstrates how the workflow engine validates API +# responses against an OpenAPI specification, including complex response +# formats like JSON:API (https://jsonapi.org/). +# +# Key features: +# - Request validation: incoming requests are checked against the spec +# - Response validation: outgoing responses are checked against the spec +# - response_action: "warn" logs violations, "error" rejects them with 500 +# +# The JSON:API spec (specs/jsonapi-articles.yaml) defines a complex nested +# response envelope with required fields (data, type, id, attributes) that +# the engine validates automatically. + +requires: + plugins: + - name: workflow-plugin-http + - name: workflow-plugin-openapi + +modules: + # HTTP server + - name: jsonapi-server + type: http.server + config: + address: ":8096" + + # HTTP router + - name: jsonapi-router + type: http.router + dependsOn: + - jsonapi-server + + # OpenAPI module with response validation enabled + - name: articles-api + type: openapi + dependsOn: + - jsonapi-router + config: + spec_file: specs/jsonapi-articles.yaml + base_path: /api/v1 + router: jsonapi-router + validation: + request: true + response: true + response_action: warn # "warn" = log and pass through; "error" = reject with 500 + swagger_ui: + enabled: true + path: /docs + +# Pipelines that generate JSON:API compliant responses. +# The OpenAPI response validation ensures these conform to the spec. +pipelines: + list-articles: + steps: + - name: build-response + type: step.set + config: + values: + response_status: 200 + response_headers: + Content-Type: "application/vnd.api+json" + response_body: | + { + "data": [ + { + "type": "articles", + "id": "1", + "attributes": { + "title": "Getting Started with Workflow Engine", + "body": "This article explains how to use the workflow engine...", + "created_at": "2024-01-15T10:30:00Z" + }, + "relationships": { + "author": { + "data": {"type": "people", "id": "42"} + } + } + } + ], + "meta": {"total": 1, "page": 1, "per_page": 10}, + "links": {"self": "/api/v1/articles"} + } + + get-article: + steps: + - name: build-response + type: step.set + config: + values: + response_status: 200 + response_headers: + Content-Type: "application/vnd.api+json" + response_body: | + { + "data": { + "type": "articles", + "id": "1", + "attributes": { + "title": "Getting Started with Workflow Engine", + "body": "This article explains how to use the workflow engine...", + "created_at": "2024-01-15T10:30:00Z" + } + }, + "links": {"self": "/api/v1/articles/1"} + } diff --git a/example/openapi-petstore.yaml b/example/openapi-petstore.yaml index c2b6f2d1..c660cc99 100644 --- a/example/openapi-petstore.yaml +++ b/example/openapi-petstore.yaml @@ -29,7 +29,8 @@ modules: router: petstore-router validation: request: true - response: false + response: true + response_action: warn swagger_ui: enabled: true path: /docs diff --git a/example/specs/jsonapi-articles.yaml b/example/specs/jsonapi-articles.yaml new file mode 100644 index 00000000..851dddc4 --- /dev/null +++ b/example/specs/jsonapi-articles.yaml @@ -0,0 +1,189 @@ +openapi: "3.0.0" +info: + title: Articles API (JSON:API) + version: "1.0.0" + description: | + A sample API that returns JSON:API (https://jsonapi.org/) compliant responses. + Demonstrates OpenAPI response validation against a complex JSON:API envelope. + +paths: + /articles: + get: + operationId: listArticles + summary: List all articles + x-pipeline: list-articles + parameters: + - name: page[number] + in: query + required: false + schema: + type: integer + minimum: 1 + - name: page[size] + in: query + required: false + schema: + type: integer + minimum: 1 + maximum: 100 + responses: + "200": + description: A JSON:API compliant list of articles + content: + application/vnd.api+json: + schema: + type: object + required: + - data + properties: + data: + type: array + items: + type: object + required: + - type + - id + - attributes + properties: + type: + type: string + id: + type: string + attributes: + type: object + required: + - title + properties: + title: + type: string + body: + type: string + created_at: + type: string + relationships: + type: object + properties: + author: + type: object + properties: + data: + type: object + required: + - type + - id + properties: + type: + type: string + id: + type: string + included: + type: array + items: + type: object + required: + - type + - id + properties: + type: + type: string + id: + type: string + attributes: + type: object + meta: + type: object + properties: + total: + type: integer + page: + type: integer + per_page: + type: integer + links: + type: object + properties: + self: + type: string + first: + type: string + last: + type: string + next: + type: string + prev: + type: string + + /articles/{id}: + get: + operationId: getArticle + summary: Get a single article + x-pipeline: get-article + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: A single article resource + content: + application/vnd.api+json: + schema: + type: object + required: + - data + properties: + data: + type: object + required: + - type + - id + - attributes + properties: + type: + type: string + id: + type: string + attributes: + type: object + required: + - title + properties: + title: + type: string + body: + type: string + created_at: + type: string + relationships: + type: object + links: + type: object + properties: + self: + type: string + "404": + description: Article not found + content: + application/vnd.api+json: + schema: + type: object + required: + - errors + properties: + errors: + type: array + minItems: 1 + items: + type: object + required: + - status + - title + properties: + status: + type: string + title: + type: string + detail: + type: string diff --git a/example/specs/petstore.yaml b/example/specs/petstore.yaml index b16b9973..19ee52cf 100644 --- a/example/specs/petstore.yaml +++ b/example/specs/petstore.yaml @@ -20,6 +20,25 @@ paths: responses: "200": description: A list of pets + content: + application/json: + schema: + type: array + items: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + minLength: 1 + maxLength: 100 + tag: + type: string + maxLength: 50 "400": description: Invalid request post: @@ -43,7 +62,21 @@ paths: maxLength: 50 responses: "201": - description: Null response + description: Created pet + content: + application/json: + schema: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + tag: + type: string "400": description: Validation error @@ -60,8 +93,31 @@ paths: responses: "200": description: Expected response to a valid request + content: + application/json: + schema: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + tag: + type: string "404": description: Pet not found + content: + application/json: + schema: + type: object + required: + - error + properties: + error: + type: string /pets/{petId}/status: put: @@ -91,6 +147,22 @@ paths: responses: "200": description: Updated + content: + application/json: + schema: + type: object + required: + - id + - status + properties: + id: + type: integer + status: + type: string + enum: + - available + - pending + - sold "400": description: Invalid status "404": diff --git a/modernize/rules.go b/modernize/rules.go index 72702291..174bb135 100644 --- a/modernize/rules.go +++ b/modernize/rules.go @@ -201,39 +201,62 @@ func hyphenStepsRule() Rule { // hyphenStepsFixConfig updates step name references inside config mapping values. // It handles: template index expressions, conditional field dot-paths, route values, -// and default values that are exact step name matches. +// default values that are exact step name matches, and sequence elements (e.g., params arrays). func hyphenStepsFixConfig(cfg *yaml.Node, renames map[string]string, changes *[]Change) { - if cfg.Kind != yaml.MappingNode { - return - } - for i := 0; i+1 < len(cfg.Content); i += 2 { - key := cfg.Content[i] - val := cfg.Content[i+1] - - switch { - case val.Kind == yaml.MappingNode: - // Recurse into nested maps (e.g., routes map in step.conditional) - hyphenStepsFixConfig(val, renames, changes) - case val.Kind == yaml.ScalarNode: - for oldName, newName := range renames { - updated := hyphenStepsFixScalar(key.Value, val.Value, oldName, newName) - if updated != val.Value { - *changes = append(*changes, Change{ - RuleID: "hyphen-steps", - Line: val.Line, - Description: fmt.Sprintf("Updated reference %q in config", oldName), - }) - val.Value = updated + switch cfg.Kind { + case yaml.MappingNode: + for i := 0; i+1 < len(cfg.Content); i += 2 { + key := cfg.Content[i] + val := cfg.Content[i+1] + + switch val.Kind { + case yaml.MappingNode: + hyphenStepsFixConfig(val, renames, changes) + case yaml.SequenceNode: + hyphenStepsFixConfig(val, renames, changes) + case yaml.ScalarNode: + for oldName, newName := range renames { + updated := hyphenStepsFixScalar(key.Value, val.Value, oldName, newName) + if updated != val.Value { + *changes = append(*changes, Change{ + RuleID: "hyphen-steps", + Line: val.Line, + Description: fmt.Sprintf("Updated reference %q in config", oldName), + }) + val.Value = updated + } + } + } + } + case yaml.SequenceNode: + for _, elem := range cfg.Content { + switch elem.Kind { + case yaml.ScalarNode: + for oldName, newName := range renames { + updated := hyphenStepsFixScalar("", elem.Value, oldName, newName) + if updated != elem.Value { + *changes = append(*changes, Change{ + RuleID: "hyphen-steps", + Line: elem.Line, + Description: fmt.Sprintf("Updated reference %q in sequence", oldName), + }) + elem.Value = updated + } } + case yaml.MappingNode: + hyphenStepsFixConfig(elem, renames, changes) + case yaml.SequenceNode: + hyphenStepsFixConfig(elem, renames, changes) } } } } // hyphenStepsFixScalar updates a single scalar value, only in safe contexts: -// - "field" key: dot-path like steps.old-name.output +// - "field"/"body_from" key: dot-path like steps.old-name.output // - "default" key or route values: exact step name match // - Template index expressions: index .steps "old-name" "field" +// - Template step function: step "old-name" "field" // - Template dot-path expressions: .steps.old-name.field func hyphenStepsFixScalar(key, value, oldName, newName string) string { // Exact match (e.g., default: old-name, or route value: old-name) @@ -249,6 +272,12 @@ func hyphenStepsFixScalar(key, value, oldName, newName string) string { updated = strings.ReplaceAll(updated, indexPattern, `index .steps "`+newName+`"`) } + // Template step function: {{ step "old-name" "field" }} + stepFnPattern := `step "` + oldName + `"` + if strings.Contains(updated, stepFnPattern) { + updated = strings.ReplaceAll(updated, stepFnPattern, `step "`+newName+`"`) + } + // Template dot-path inside {{ }}: .steps.old-name.field dotPattern := ".steps." + oldName + "." if strings.Contains(updated, dotPattern) { @@ -260,8 +289,8 @@ func hyphenStepsFixScalar(key, value, oldName, newName string) string { updated = strings.ReplaceAll(updated, dotPatternEnd, ".steps."+newName+" ") } - // Conditional field dot-path (no template braces): steps.old-name.output - if key == "field" { + // Dot-path references in field/body_from: steps.old-name.output + if key == "field" || key == "body_from" { fieldPattern := "steps." + oldName + "." if strings.Contains(updated, fieldPattern) { updated = strings.ReplaceAll(updated, fieldPattern, "steps."+newName+".") diff --git a/modernize/rules_test.go b/modernize/rules_test.go index 87aea90d..4895b2fd 100644 --- a/modernize/rules_test.go +++ b/modernize/rules_test.go @@ -158,6 +158,118 @@ pipelines: } } +func TestHyphenSteps_RenamesSequenceElements(t *testing.T) { + input := ` +pipelines: + create-item: + steps: + - name: require-auth + type: step.auth_validate + config: + auth_module: auth-jwt + - name: insert-item + type: step.db_exec + config: + database: db + query: "INSERT INTO items (id, user_id) VALUES ($1, $2)" + params: + - "{{ uuidv4 }}" + - '{{ index .steps "require-auth" "sub" }}' +` + root := parseYAML(t, input) + rule := hyphenStepsRule() + changes := rule.Fix(root) + + out, err := yaml.Marshal(root) + if err != nil { + t.Fatal(err) + } + output := string(out) + + if !strings.Contains(output, `index .steps "require_auth" "sub"`) { + t.Errorf("template reference in params array not updated\noutput:\n%s", output) + } + if !strings.Contains(output, "name: require_auth") { + t.Error("step name not renamed") + } + + // Should have at least 3 changes: 2 name renames + 1 params reference + if len(changes) < 3 { + t.Errorf("expected at least 3 changes, got %d", len(changes)) + for _, c := range changes { + t.Logf(" %s (line %d): %s", c.RuleID, c.Line, c.Description) + } + } +} + +func TestHyphenSteps_RenamesStepFunction(t *testing.T) { + input := ` +pipelines: + update-profile: + steps: + - name: fetch-current + type: step.db_query + config: + database: db + query: "SELECT * FROM users WHERE id = $1" + mode: single + - name: merge-values + type: step.set + config: + values: + merged_name: '{{ trigger "body" "name" | default (step "fetch-current" "row" "name") }}' + merged_bio: '{{ trigger "body" "bio" | default (step "fetch-current" "row" "bio") | default "" }}' +` + root := parseYAML(t, input) + rule := hyphenStepsRule() + rule.Fix(root) + + out, err := yaml.Marshal(root) + if err != nil { + t.Fatal(err) + } + output := string(out) + + if !strings.Contains(output, `step "fetch_current" "row" "name"`) { + t.Errorf("step function reference not updated\noutput:\n%s", output) + } + if !strings.Contains(output, `step "fetch_current" "row" "bio"`) { + t.Errorf("step function reference in second value not updated\noutput:\n%s", output) + } +} + +func TestHyphenSteps_RenamesBodyFromPaths(t *testing.T) { + input := ` +pipelines: + analytics: + steps: + - name: fetch-stats + type: step.db_query + config: + database: db + query: "SELECT * FROM stats" + mode: single + - name: respond + type: step.json_response + config: + status: 200 + body_from: 'steps.fetch-stats.row' +` + root := parseYAML(t, input) + rule := hyphenStepsRule() + rule.Fix(root) + + out, err := yaml.Marshal(root) + if err != nil { + t.Fatal(err) + } + output := string(out) + + if !strings.Contains(output, "steps.fetch_stats.row") { + t.Errorf("body_from path not updated\noutput:\n%s", output) + } +} + func TestHyphenSteps_Check(t *testing.T) { input := ` pipelines: diff --git a/module/auth_m2m.go b/module/auth_m2m.go index 394c7d6d..b9aee52d 100644 --- a/module/auth_m2m.go +++ b/module/auth_m2m.go @@ -63,6 +63,32 @@ type M2MClient struct { Claims map[string]any `json:"claims,omitempty"` } +// TrustedKeyConfig holds the configuration for a trusted external JWT issuer. +// It is used to register trusted keys for the JWT-bearer grant via YAML configuration. +type TrustedKeyConfig struct { + // Issuer is the expected `iss` claim value (e.g. "https://legacy-platform.example.com"). + Issuer string `json:"issuer" yaml:"issuer"` + // Algorithm is the expected signing algorithm (e.g. "ES256"). Currently only ES256 is supported. + Algorithm string `json:"algorithm,omitempty" yaml:"algorithm,omitempty"` + // PublicKeyPEM is the PEM-encoded EC public key for the trusted issuer. + // Literal `\n` sequences (common in Docker/Kubernetes env vars) are normalised to newlines. + PublicKeyPEM string `json:"publicKeyPEM,omitempty" yaml:"publicKeyPEM,omitempty"` //nolint:gosec // G117: config DTO field + // Audiences is an optional list of accepted audience values. + // When non-empty, the assertion's `aud` claim must contain at least one of these values. + Audiences []string `json:"audiences,omitempty" yaml:"audiences,omitempty"` + // ClaimMapping renames claims from the external assertion before they are included in the + // issued token. The map key is the external claim name; the value is the local claim name. + // For example {"user_id": "sub"} promotes the external `user_id` claim to `sub`. + ClaimMapping map[string]string `json:"claimMapping,omitempty" yaml:"claimMapping,omitempty"` +} + +// trustedKeyEntry is the internal representation of a trusted external JWT issuer. +type trustedKeyEntry struct { + pubKey *ecdsa.PublicKey + audiences []string + claimMapping map[string]string +} + // M2MAuthModule provides machine-to-machine (server-to-server) OAuth2 authentication. // It supports the client_credentials grant and the JWT-bearer grant, and can issue // tokens signed with either HS256 (shared secret) or ES256 (ECDSA P-256). @@ -84,7 +110,7 @@ type M2MAuthModule struct { publicKey *ecdsa.PublicKey // Trusted public keys for JWT-bearer grant (keyed by key ID or issuer) - trustedKeys map[string]*ecdsa.PublicKey + trustedKeys map[string]*trustedKeyEntry // Registered clients mu sync.RWMutex @@ -116,7 +142,7 @@ func NewM2MAuthModule(name string, hmacSecret string, tokenExpiry time.Duration, issuer: issuer, tokenExpiry: tokenExpiry, hmacSecret: []byte(hmacSecret), - trustedKeys: make(map[string]*ecdsa.PublicKey), + trustedKeys: make(map[string]*trustedKeyEntry), clients: make(map[string]*M2MClient), jtiBlacklist: make(map[string]time.Time), } @@ -166,7 +192,46 @@ func (m *M2MAuthModule) SetInitErr(err error) { func (m *M2MAuthModule) AddTrustedKey(keyID string, pubKey *ecdsa.PublicKey) { m.mu.Lock() defer m.mu.Unlock() - m.trustedKeys[keyID] = pubKey + m.trustedKeys[keyID] = &trustedKeyEntry{pubKey: pubKey} +} + +// AddTrustedKeyFromPEM parses a PEM-encoded EC public key and registers it as a trusted +// key for JWT-bearer assertion validation. Literal `\n` sequences in the PEM string are +// normalised to real newlines so that env-var-injected keys (Docker/Kubernetes) work without +// additional preprocessing by the caller. +// +// audiences is an optional list; when non-empty the assertion's `aud` claim must match at +// least one entry. claimMapping renames external claims before they are forwarded into the +// issued token (map key = external name, map value = local name). +func (m *M2MAuthModule) AddTrustedKeyFromPEM(issuer, publicKeyPEM string, audiences []string, claimMapping map[string]string) error { + // Normalise escaped newlines that are common in Docker/Kubernetes env vars. + normalised := strings.ReplaceAll(publicKeyPEM, `\n`, "\n") + + block, _ := pem.Decode([]byte(normalised)) + if block == nil { + return fmt.Errorf("auth.m2m: failed to decode PEM block for issuer %q", issuer) + } + + pubAny, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return fmt.Errorf("auth.m2m: parse public key for issuer %q: %w", issuer, err) + } + ecKey, ok := pubAny.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("auth.m2m: public key for issuer %q is not an ECDSA key", issuer) + } + if ecKey.Curve != elliptic.P256() { + return fmt.Errorf("auth.m2m: public key for issuer %q must use P-256 (ES256) curve", issuer) + } + + m.mu.Lock() + defer m.mu.Unlock() + m.trustedKeys[issuer] = &trustedKeyEntry{ + pubKey: ecKey, + audiences: audiences, + claimMapping: claimMapping, + } + return nil } // RegisterClient registers a new OAuth2 client. @@ -676,19 +741,19 @@ func (m *M2MAuthModule) validateJWTAssertion(assertion string) (jwt.MapClaims, e m.mu.RLock() // Try kid first, then iss. - var selectedKey *ecdsa.PublicKey + var selectedEntry *trustedKeyEntry if kid != "" { - selectedKey = m.trustedKeys[kid] + selectedEntry = m.trustedKeys[kid] } - if selectedKey == nil && iss != "" { - selectedKey = m.trustedKeys[iss] + if selectedEntry == nil && iss != "" { + selectedEntry = m.trustedKeys[iss] } hmacSecret := m.hmacSecret m.mu.RUnlock() // Try EC key if found. - if selectedKey != nil { - k := selectedKey + if selectedEntry != nil && selectedEntry.pubKey != nil { + k := selectedEntry.pubKey token, err := jwt.Parse(assertion, func(token *jwt.Token) (any, error) { if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) @@ -702,6 +767,19 @@ func (m *M2MAuthModule) validateJWTAssertion(assertion string) (jwt.MapClaims, e if !ok || !token.Valid { return nil, fmt.Errorf("invalid assertion claims") } + + // Validate audience if configured. + if len(selectedEntry.audiences) > 0 { + if err := validateAssertionAudience(claims, selectedEntry.audiences); err != nil { + return nil, err + } + } + + // Apply claim mapping if configured. + if len(selectedEntry.claimMapping) > 0 { + claims = applyAssertionClaimMapping(claims, selectedEntry.claimMapping) + } + return claims, nil } @@ -1032,3 +1110,50 @@ func oauthError(code, description string) map[string]string { "error_description": description, } } + +// validateAssertionAudience checks that the JWT claims contain at least one of the +// required audience values. The `aud` claim can be a single string or a JSON array. +func validateAssertionAudience(claims jwt.MapClaims, requiredAudiences []string) error { + aud := claims["aud"] + if aud == nil { + return fmt.Errorf("assertion missing aud claim, expected one of %v", requiredAudiences) + } + var tokenAuds []string + switch v := aud.(type) { + case string: + tokenAuds = []string{v} + case []any: + for _, a := range v { + if s, ok := a.(string); ok { + tokenAuds = append(tokenAuds, s) + } + } + } + for _, required := range requiredAudiences { + for _, tokenAud := range tokenAuds { + if tokenAud == required { + return nil + } + } + } + return fmt.Errorf("assertion audience %v does not include required audience %v", tokenAuds, requiredAudiences) +} + +// applyAssertionClaimMapping renames claims from an external assertion before they are +// forwarded into the issued token. The mapping key is the external claim name; the +// value is the local claim name. The original claim is removed when the names differ. +func applyAssertionClaimMapping(claims jwt.MapClaims, mapping map[string]string) jwt.MapClaims { + result := make(jwt.MapClaims, len(claims)) + for k, v := range claims { + result[k] = v + } + for externalKey, localKey := range mapping { + if val, exists := claims[externalKey]; exists { + result[localKey] = val + if externalKey != localKey { + delete(result, externalKey) + } + } + } + return result +} diff --git a/module/auth_m2m_test.go b/module/auth_m2m_test.go index 0e9c38c8..9e8d11d3 100644 --- a/module/auth_m2m_test.go +++ b/module/auth_m2m_test.go @@ -1458,11 +1458,210 @@ func TestM2M_AddTrustedKey(t *testing.T) { stored := m.trustedKeys["svc"] m.mu.RUnlock() - if stored == nil { + if stored == nil || stored.pubKey == nil { t.Error("expected key to be stored") } } +// ecPublicKeyToPEM marshals an ECDSA public key to a PEM-encoded string. +func ecPublicKeyToPEM(t *testing.T, pub *ecdsa.PublicKey) string { + t.Helper() + pkixBytes, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + t.Fatalf("MarshalPKIXPublicKey: %v", err) + } + return string(pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pkixBytes})) +} + +func TestM2M_AddTrustedKeyFromPEM_Valid(t *testing.T) { + m := newM2MES256(t) + key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + pemStr := ecPublicKeyToPEM(t, &key.PublicKey) + + if err := m.AddTrustedKeyFromPEM("issuer-a", pemStr, nil, nil); err != nil { + t.Fatalf("AddTrustedKeyFromPEM: %v", err) + } + + m.mu.RLock() + stored := m.trustedKeys["issuer-a"] + m.mu.RUnlock() + + if stored == nil || stored.pubKey == nil { + t.Error("expected key to be stored") + } +} + +func TestM2M_AddTrustedKeyFromPEM_EscapedNewlines(t *testing.T) { + m := newM2MES256(t) + key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + pemStr := ecPublicKeyToPEM(t, &key.PublicKey) + // Simulate Docker/Kubernetes env var with literal \n instead of real newlines. + escapedPEM := strings.ReplaceAll(pemStr, "\n", `\n`) + + if err := m.AddTrustedKeyFromPEM("issuer-b", escapedPEM, nil, nil); err != nil { + t.Fatalf("AddTrustedKeyFromPEM with escaped newlines: %v", err) + } + + m.mu.RLock() + stored := m.trustedKeys["issuer-b"] + m.mu.RUnlock() + + if stored == nil || stored.pubKey == nil { + t.Error("expected key to be stored after escaped-newline normalisation") + } +} + +func TestM2M_AddTrustedKeyFromPEM_Invalid(t *testing.T) { + m := newM2MES256(t) + err := m.AddTrustedKeyFromPEM("issuer-bad", "not-a-pem", nil, nil) + if err == nil { + t.Error("expected error for invalid PEM, got nil") + } +} + +func TestM2M_AddTrustedKeyFromPEM_NonP256Rejected(t *testing.T) { + m := newM2MES256(t) + // Generate a P-384 key, which should be rejected. + key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + t.Fatalf("generate P-384 key: %v", err) + } + pkixBytes, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + t.Fatalf("MarshalPKIXPublicKey: %v", err) + } + pemStr := string(pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pkixBytes})) + + if err := m.AddTrustedKeyFromPEM("issuer-p384", pemStr, nil, nil); err == nil { + t.Error("expected error for P-384 key, got nil") + } +} + +func TestM2M_JWTBearer_AudienceValid(t *testing.T) { + server := newM2MES256(t) + clientKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + pemStr := ecPublicKeyToPEM(t, &clientKey.PublicKey) + + if err := server.AddTrustedKeyFromPEM("client-svc", pemStr, []string{"test-issuer"}, nil); err != nil { + t.Fatalf("AddTrustedKeyFromPEM: %v", err) + } + + claims := jwt.MapClaims{ + "iss": "client-svc", + "sub": "client-svc", + "aud": "test-issuer", + "iat": time.Now().Unix(), + "exp": time.Now().Add(5 * time.Minute).Unix(), + } + tok := jwt.NewWithClaims(jwt.SigningMethodES256, claims) + assertion, err := tok.SignedString(clientKey) + if err != nil { + t.Fatalf("sign assertion: %v", err) + } + + params := url.Values{ + "grant_type": {GrantTypeJWTBearer}, + "assertion": {assertion}, + } + w := postToken(t, server, params) + if w.Code != http.StatusOK { + t.Errorf("expected 200 with valid audience, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestM2M_JWTBearer_AudienceMismatch(t *testing.T) { + server := newM2MES256(t) + clientKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + pemStr := ecPublicKeyToPEM(t, &clientKey.PublicKey) + + // Require audience "test-issuer" but assertion will have "wrong-audience". + if err := server.AddTrustedKeyFromPEM("client-svc", pemStr, []string{"test-issuer"}, nil); err != nil { + t.Fatalf("AddTrustedKeyFromPEM: %v", err) + } + + claims := jwt.MapClaims{ + "iss": "client-svc", + "sub": "client-svc", + "aud": "wrong-audience", + "iat": time.Now().Unix(), + "exp": time.Now().Add(5 * time.Minute).Unix(), + } + tok := jwt.NewWithClaims(jwt.SigningMethodES256, claims) + assertion, err := tok.SignedString(clientKey) + if err != nil { + t.Fatalf("sign assertion: %v", err) + } + + params := url.Values{ + "grant_type": {GrantTypeJWTBearer}, + "assertion": {assertion}, + } + w := postToken(t, server, params) + if w.Code != http.StatusUnauthorized { + t.Errorf("expected 401 for audience mismatch, got %d", w.Code) + } +} + +func TestM2M_JWTBearer_ClaimMapping(t *testing.T) { + server := newM2MES256(t) + clientKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + pemStr := ecPublicKeyToPEM(t, &clientKey.PublicKey) + + // Map external claim "user_id" → local claim "ext_user". + claimMapping := map[string]string{"user_id": "ext_user"} + if err := server.AddTrustedKeyFromPEM("client-svc", pemStr, nil, claimMapping); err != nil { + t.Fatalf("AddTrustedKeyFromPEM: %v", err) + } + + claims := jwt.MapClaims{ + "iss": "client-svc", + "sub": "client-svc", + "aud": "test-issuer", + "iat": time.Now().Unix(), + "exp": time.Now().Add(5 * time.Minute).Unix(), + "user_id": "u-42", + } + tok := jwt.NewWithClaims(jwt.SigningMethodES256, claims) + assertion, err := tok.SignedString(clientKey) + if err != nil { + t.Fatalf("sign assertion: %v", err) + } + + params := url.Values{ + "grant_type": {GrantTypeJWTBearer}, + "assertion": {assertion}, + } + w := postToken(t, server, params) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String()) + } + + // Parse the issued access token to verify claim mapping was applied. + var resp map[string]any + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("decode response: %v", err) + } + accessToken, _ := resp["access_token"].(string) + if accessToken == "" { + t.Fatal("no access_token in response") + } + + // Parse unverified to inspect claims. + parser := new(jwt.Parser) + parsed, _, err := parser.ParseUnverified(accessToken, jwt.MapClaims{}) + if err != nil { + t.Fatalf("parse issued token: %v", err) + } + issuedClaims, _ := parsed.Claims.(jwt.MapClaims) + + if issuedClaims["ext_user"] != "u-42" { + t.Errorf("expected ext_user=u-42 in issued token, got %v", issuedClaims["ext_user"]) + } + if _, exists := issuedClaims["user_id"]; exists { + t.Error("expected user_id to be removed by claim mapping") + } +} + // --- DefaultExpiry / issuer defaults --- func TestM2M_DefaultExpiry(t *testing.T) { diff --git a/module/database.go b/module/database.go index 3ecdde67..a9ad31b1 100644 --- a/module/database.go +++ b/module/database.go @@ -236,9 +236,10 @@ func (w *WorkflowDatabase) Query(ctx context.Context, sqlStr string, args ...any row := make(map[string]any) for i, col := range columns { val := values[i] - // Convert byte slices to strings for readability + // Convert byte slices: try JSON parse first (handles PostgreSQL + // json/jsonb columns), fall back to string for non-JSON byte data. if b, ok := val.([]byte); ok { - row[col] = string(b) + row[col] = parseJSONBytesOrString(b) } else { row[col] = val } diff --git a/module/database_partitioned.go b/module/database_partitioned.go index d2f1687a..1729e237 100644 --- a/module/database_partitioned.go +++ b/module/database_partitioned.go @@ -7,6 +7,7 @@ import ( "regexp" "strings" "sync" + "time" "github.com/GoCodeAlone/modular" ) @@ -120,6 +121,17 @@ type PartitionedDatabaseConfig struct { // Defaults to PartitionKey if empty. SourceColumn string `json:"sourceColumn" yaml:"sourceColumn"` + // ── Lifecycle sync settings ─────────────────────────────────────────────── + // AutoSync controls whether SyncPartitionsFromSource is called automatically + // during Start(). Defaults to true when any sourceTable is configured. + // Set to false to disable automatic sync on startup. + AutoSync *bool `json:"autoSync" yaml:"autoSync"` + // SyncInterval is a duration string (e.g. "60s", "5m") for periodic + // re-sync of partitions from the source table. When set, a background + // goroutine calls SyncPartitionsFromSource at this interval after Start(). + // Requires at least one sourceTable to be configured. Example: "60s". + SyncInterval string `json:"syncInterval" yaml:"syncInterval"` + // ── Multi-partition mode ───────────────────────────────────────────────── // Partitions lists independent partition key configurations. When non-empty, // the single-partition fields above are ignored. @@ -135,6 +147,11 @@ type PartitionedDatabase struct { partitions []PartitionConfig // normalized; always len >= 1 after construction base *WorkflowDatabase mu sync.RWMutex + logger modular.Logger + + // periodic sync state + syncStop chan struct{} + syncWg sync.WaitGroup } // normalizePartitionConfig applies defaults to a PartitionConfig and returns the result. @@ -190,6 +207,7 @@ func (p *PartitionedDatabase) Name() string { return p.name } // Init registers this module as a service. func (p *PartitionedDatabase) Init(app modular.Application) error { + p.logger = app.Logger() return app.RegisterService(p.name, p) } @@ -209,13 +227,93 @@ func (p *PartitionedDatabase) RequiresServices() []modular.ServiceDependency { return nil } -// Start opens the database connection during application startup. +// Start opens the database connection during application startup. When autoSync +// is enabled (the default when any sourceTable is configured), it calls +// SyncPartitionsFromSource to create partitions for all existing tenant values. +// When syncInterval is configured, a background goroutine periodically re-syncs +// partitions at that interval. func (p *PartitionedDatabase) Start(ctx context.Context) error { - return p.base.Start(ctx) + if err := p.base.Start(ctx); err != nil { + return err + } + + // Determine whether any partition config has a sourceTable. + hasSourceTable := false + for _, cfg := range p.partitions { + if cfg.SourceTable != "" { + hasSourceTable = true + break + } + } + + // Auto-sync on startup: default true when sourceTable is configured. + autoSync := hasSourceTable + if p.config.AutoSync != nil { + autoSync = *p.config.AutoSync + } + + if autoSync && hasSourceTable { + if err := p.SyncPartitionsFromSource(ctx); err != nil { + // DB was opened; close it to avoid leaking the connection on startup failure. + _ = p.base.Stop(ctx) + return fmt.Errorf("partitioned database %q: auto-sync on startup failed: %w", p.name, err) + } + } + + // Start periodic sync goroutine if syncInterval is configured. + if p.config.SyncInterval != "" && hasSourceTable { + interval, err := time.ParseDuration(p.config.SyncInterval) + if err != nil { + // DB was opened; close it to avoid leaking the connection on startup failure. + _ = p.base.Stop(ctx) + return fmt.Errorf("partitioned database %q: invalid syncInterval %q: %w", p.name, p.config.SyncInterval, err) + } + if interval > 0 { + if p.base.DB() == nil { + // No database connection is available; starting the goroutine would + // produce repeated error logs with no useful work. + _ = p.base.Stop(ctx) + return fmt.Errorf("partitioned database %q: syncInterval requires an open database connection (is DSN configured?)", p.name) + } + p.syncStop = make(chan struct{}) + p.syncWg.Add(1) + go p.runPeriodicSync(ctx, interval) + } + } + + return nil +} + +// runPeriodicSync runs SyncPartitionsFromSource on a ticker until stopSync is +// closed or the parent context is cancelled. +func (p *PartitionedDatabase) runPeriodicSync(ctx context.Context, interval time.Duration) { + defer p.syncWg.Done() + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-p.syncStop: + return + case <-ctx.Done(): + return + case <-ticker.C: + if err := p.SyncPartitionsFromSource(ctx); err != nil { + if p.logger != nil { + p.logger.Error("partitioned database periodic sync failed", + "module", p.name, "error", err) + } + } + } + } } // Stop closes the database connection during application shutdown. func (p *PartitionedDatabase) Stop(ctx context.Context) error { + if p.syncStop != nil { + close(p.syncStop) + p.syncWg.Wait() + p.syncStop = nil + } return p.base.Stop(ctx) } diff --git a/module/database_partitioned_test.go b/module/database_partitioned_test.go index db884d8e..87a6945a 100644 --- a/module/database_partitioned_test.go +++ b/module/database_partitioned_test.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "testing" + "time" ) func TestPartitionedDatabase_PartitionKey(t *testing.T) { @@ -838,3 +839,235 @@ func TestDBSyncPartitionsStep_NotPartitionManager(t *testing.T) { t.Fatal("expected error when service does not implement PartitionManager") } } + +// ─── Auto-sync and periodic sync tests ─────────────────────────────────────── + +// boolPtr is a test helper that returns a pointer to a bool value. +func boolPtr(v bool) *bool { return &v } + +func TestPartitionedDatabase_Start_NoSourceTable_NoSync(t *testing.T) { + // When no sourceTable is configured, Start should succeed without attempting sync. + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + // No DSN: base.Start is a no-op; no sourceTable: no sync attempted. + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + if err := pd.Start(context.Background()); err != nil { + t.Fatalf("unexpected Start error: %v", err) + } + _ = pd.Stop(context.Background()) +} + +func TestPartitionedDatabase_Start_AutoSyncDisabled_NoSync(t *testing.T) { + // When autoSync is explicitly false, Start should not call SyncPartitionsFromSource + // even when sourceTable is configured. + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SourceTable: "tenants", + AutoSync: boolPtr(false), + // No DSN: base.Start is a no-op; sourceTable set but autoSync=false. + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + if err := pd.Start(context.Background()); err != nil { + t.Fatalf("unexpected Start error: %v", err) + } + _ = pd.Stop(context.Background()) +} + +func TestPartitionedDatabase_Start_AutoSyncEnabled_NilDB(t *testing.T) { + // When autoSync defaults to true and sourceTable is configured, Start must + // attempt SyncPartitionsFromSource. With no DB connection the sync returns + // "database connection is nil", which Start wraps and returns. + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SourceTable: "tenants", + // No DSN: base.Start is a no-op so DB stays nil. + // AutoSync not set: defaults to true when sourceTable is present. + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + err := pd.Start(context.Background()) + if err == nil { + t.Fatal("expected Start to return an error when DB connection is nil") + } + if !strings.Contains(err.Error(), "auto-sync on startup failed") { + t.Errorf("expected auto-sync error message, got: %v", err) + } +} + +func TestPartitionedDatabase_Start_InvalidSyncInterval(t *testing.T) { + // An invalid syncInterval string must cause Start to return a parse error. + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SourceTable: "tenants", + AutoSync: boolPtr(false), // skip startup sync so we reach interval parsing + SyncInterval: "not-a-duration", + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + err := pd.Start(context.Background()) + if err == nil { + t.Fatal("expected Start to return an error for invalid syncInterval") + } + if !strings.Contains(err.Error(), "invalid syncInterval") { + t.Errorf("expected syncInterval parse error, got: %v", err) + } +} + +func TestPartitionedDatabase_SyncInterval_NoSourceTable_NoGoroutine(t *testing.T) { + // When syncInterval is set but no sourceTable is configured, no background + // goroutine is started (hasSourceTable=false gates the goroutine launch). + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SyncInterval: "100ms", + // No sourceTable: no goroutine should be started. + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + if err := pd.Start(context.Background()); err != nil { + t.Fatalf("unexpected Start error: %v", err) + } + + if pd.syncStop != nil { + t.Error("expected syncStop channel to be nil when no sourceTable is configured") + } + + if err := pd.Stop(context.Background()); err != nil { + t.Fatalf("unexpected Stop error: %v", err) + } +} + +func TestPartitionedDatabase_PeriodicSync_GoroutineLifecycle(t *testing.T) { + // When sourceTable is configured, autoSync is false, and syncInterval is set, + // a background goroutine must be launched. Stop must cleanly terminate it. + // Use sqlite so the DB connection is real (nil-DB guard requires an open connection). + cfg := PartitionedDatabaseConfig{ + Driver: "sqlite", + DSN: ":memory:", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SourceTable: "tenants", + AutoSync: boolPtr(false), // skip startup sync + SyncInterval: "100ms", + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + if err := pd.Start(context.Background()); err != nil { + t.Fatalf("unexpected Start error: %v", err) + } + + if pd.syncStop == nil { + t.Fatal("expected syncStop channel to be set after Start with syncInterval") + } + + // Ensure Stop cleanly terminates the background goroutine without panic or deadlock. + done := make(chan error, 1) + go func() { done <- pd.Stop(context.Background()) }() + + select { + case err := <-done: + if err != nil { + t.Errorf("unexpected Stop error: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatal("Stop did not return within 2 seconds") + } +} + +func TestPartitionedDatabase_AutoSync_DefaultTrueWhenSourceTableSet(t *testing.T) { + // Confirm that AutoSync==nil is treated as "true" when sourceTable is + // configured: Start must attempt sync (and fail with nil DB error). + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + SourceTable: "tenants", + // AutoSync is nil: should behave as true when sourceTable is present. + } + if cfg.AutoSync != nil { + t.Fatal("AutoSync must be nil for this test to be meaningful") + } + + pd := NewPartitionedDatabase("db", cfg) + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + err := pd.Start(context.Background()) + if err == nil { + t.Fatal("expected Start to fail when autoSync defaults to true and DB is nil") + } + if !strings.Contains(err.Error(), "auto-sync on startup failed") { + t.Errorf("expected auto-sync startup error, got: %v", err) + } +} + +func TestPartitionedDatabase_SyncInterval_NilDB_ReturnsError(t *testing.T) { + // When syncInterval is configured and sourceTable is set, but no DSN is + // provided (DB is nil), Start must return a clear error instead of starting + // a goroutine that would repeatedly fail and produce log noise. + cfg := PartitionedDatabaseConfig{ + Driver: "pgx", + PartitionKey: "tenant_id", + Tables: []string{"forms"}, + SourceTable: "tenants", + AutoSync: boolPtr(false), // skip startup sync to isolate interval check + SyncInterval: "100ms", + // No DSN: base.Start is a no-op → DB remains nil. + } + pd := NewPartitionedDatabase("db", cfg) + + app := NewMockApplication() + if err := pd.Init(app); err != nil { + t.Fatalf("Init error: %v", err) + } + + err := pd.Start(context.Background()) + if err == nil { + t.Fatal("expected Start to return an error when syncInterval is set but DB is nil") + } + if !strings.Contains(err.Error(), "syncInterval requires an open database connection") { + t.Errorf("expected nil-DB syncInterval error, got: %v", err) + } +} diff --git a/module/database_scan_helpers.go b/module/database_scan_helpers.go new file mode 100644 index 00000000..dda45992 --- /dev/null +++ b/module/database_scan_helpers.go @@ -0,0 +1,41 @@ +package module + +import ( + "bytes" + "encoding/json" +) + +// parseJSONBytesOrString attempts to unmarshal b as JSON. If successful the +// parsed Go value is returned (map[string]any, []any, string, float64, bool, +// or nil). This transparently handles PostgreSQL json/jsonb columns, which the +// pgx driver delivers as raw JSON bytes rather than pre-typed Go values. +// +// A cheap leading-byte pre-check is applied first so that binary blobs (e.g. +// PostgreSQL bytea) skip the full JSON parser entirely and fall back to +// string conversion without incurring unnecessary CPU overhead. +// +// If b is not valid JSON (e.g. PostgreSQL bytea binary data), string(b) is +// returned so that the existing string-fallback behaviour is preserved. +func parseJSONBytesOrString(b []byte) any { + if len(b) == 0 { + return string(b) + } + // Quick check: JSON must start with one of these characters (after optional + // whitespace). Anything else is definitely not JSON and we avoid calling the + // full decoder on large binary blobs. + trimmed := bytes.TrimLeft(b, " \t\r\n") + if len(trimmed) == 0 { + return string(b) + } + first := trimmed[0] + if first != '{' && first != '[' && first != '"' && + first != 't' && first != 'f' && first != 'n' && + first != '-' && (first < '0' || first > '9') { + return string(b) + } + var v any + if err := json.Unmarshal(b, &v); err == nil { + return v + } + return string(b) +} diff --git a/module/http_middleware.go b/module/http_middleware.go index 7eaa2ff5..e78128e0 100644 --- a/module/http_middleware.go +++ b/module/http_middleware.go @@ -6,6 +6,7 @@ import ( "math" "net" "net/http" + "net/url" "strconv" "strings" "sync" @@ -318,19 +319,59 @@ func (m *LoggingMiddleware) RequiresServices() []modular.ServiceDependency { return nil } +// CORSMiddlewareConfig holds configuration for the CORS middleware. +type CORSMiddlewareConfig struct { + // AllowedOrigins is the list of origins allowed to make cross-origin requests. + // Use "*" to allow all origins. Supports wildcard subdomain patterns like "*.example.com". + AllowedOrigins []string + // AllowedMethods is the list of HTTP methods allowed in CORS requests. + AllowedMethods []string + // AllowedHeaders is the list of HTTP headers allowed in CORS requests. + // Defaults to ["Content-Type", "Authorization"] when empty. + AllowedHeaders []string + // AllowCredentials indicates whether the request can include user credentials. + // When true, the actual request Origin is reflected (never "*"). + AllowCredentials bool + // MaxAge specifies how long (in seconds) the preflight response may be cached. + // Zero means no caching directive is sent. + MaxAge int +} + // CORSMiddleware provides CORS support type CORSMiddleware struct { - name string - allowedOrigins []string - allowedMethods []string + name string + allowedOrigins []string + allowedMethods []string + allowedHeaders []string + allowCredentials bool + maxAge int } -// NewCORSMiddleware creates a new CORS middleware +// defaultCORSHeaders is the default set of allowed headers for backward compatibility. +var defaultCORSHeaders = []string{"Content-Type", "Authorization"} + +// NewCORSMiddleware creates a new CORS middleware with default allowed headers. func NewCORSMiddleware(name string, allowedOrigins, allowedMethods []string) *CORSMiddleware { + return NewCORSMiddlewareWithConfig(name, CORSMiddlewareConfig{ + AllowedOrigins: allowedOrigins, + AllowedMethods: allowedMethods, + }) +} + +// NewCORSMiddlewareWithConfig creates a new CORS middleware with full configuration. +// If AllowedHeaders is empty, it defaults to ["Content-Type", "Authorization"]. +func NewCORSMiddlewareWithConfig(name string, cfg CORSMiddlewareConfig) *CORSMiddleware { + headers := cfg.AllowedHeaders + if len(headers) == 0 { + headers = defaultCORSHeaders + } return &CORSMiddleware{ - name: name, - allowedOrigins: allowedOrigins, - allowedMethods: allowedMethods, + name: name, + allowedOrigins: cfg.AllowedOrigins, + allowedMethods: cfg.AllowedMethods, + allowedHeaders: headers, + allowCredentials: cfg.AllowCredentials, + maxAge: cfg.MaxAge, } } @@ -344,24 +385,49 @@ func (m *CORSMiddleware) Init(app modular.Application) error { return nil } +// corsOriginAllowed checks if the given origin is in the allowed list. +// It supports exact matching, "*" wildcard, and subdomain wildcards like "*.example.com". +// Wildcard patterns are matched against the parsed hostname only, so ports are handled correctly: +// "*.example.com" will match "http://sub.example.com:3000". +func corsOriginAllowed(origin string, allowedOrigins []string) bool { + if origin == "" { + return false + } + for _, allowed := range allowedOrigins { + if allowed == "*" || allowed == origin { + return true + } + // Wildcard subdomain matching: "*.example.com" matches "sub.example.com" (any port). + // Parse the request origin to extract just the hostname for comparison. + if strings.HasPrefix(allowed, "*.") { + suffix := allowed[1:] // ".example.com" + u, err := url.Parse(origin) + if err == nil && strings.HasSuffix(u.Hostname(), suffix) { + return true + } + } + } + return false +} + // Process implements middleware processing func (m *CORSMiddleware) Process(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { origin := r.Header.Get("Origin") - // Check if origin is allowed - allowed := false - for _, allowedOrigin := range m.allowedOrigins { - if allowedOrigin == "*" || allowedOrigin == origin { - allowed = true - break - } - } - - if allowed { + // Only apply CORS headers when the request includes an Origin header. + // Requests without Origin are not cross-origin requests and need no CORS response. + if origin != "" && corsOriginAllowed(origin, m.allowedOrigins) { + w.Header().Add("Vary", "Origin") w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Access-Control-Allow-Methods", strings.Join(m.allowedMethods, ", ")) - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization") + w.Header().Set("Access-Control-Allow-Headers", strings.Join(m.allowedHeaders, ", ")) + if m.allowCredentials { + w.Header().Set("Access-Control-Allow-Credentials", "true") + } + if m.maxAge > 0 { + w.Header().Set("Access-Control-Max-Age", strconv.Itoa(m.maxAge)) + } } // Handle preflight requests diff --git a/module/http_middleware_test.go b/module/http_middleware_test.go index 60e3f72d..bac86e7b 100644 --- a/module/http_middleware_test.go +++ b/module/http_middleware_test.go @@ -272,7 +272,212 @@ func TestCORSMiddleware_Process_Preflight(t *testing.T) { } } -func TestCORSMiddleware_ProvidesServices(t *testing.T) { +func TestCORSMiddlewareWithConfig_AllowedHeaders(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"http://localhost:3000"}, + AllowedMethods: []string{"GET", "POST"}, + AllowedHeaders: []string{"Content-Type", "Authorization", "X-CSRF-Token", "X-Request-Id"}, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://localhost:3000") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + got := rec.Header().Get("Access-Control-Allow-Headers") + want := "Content-Type, Authorization, X-CSRF-Token, X-Request-Id" + if got != want { + t.Errorf("expected Access-Control-Allow-Headers %q, got %q", want, got) + } +} + +func TestCORSMiddlewareWithConfig_DefaultHeaders(t *testing.T) { + // When AllowedHeaders is omitted, defaults to Content-Type and Authorization. + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"http://localhost:3000"}, + AllowedMethods: []string{"GET"}, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://localhost:3000") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + got := rec.Header().Get("Access-Control-Allow-Headers") + want := "Content-Type, Authorization" + if got != want { + t.Errorf("expected default Access-Control-Allow-Headers %q, got %q", want, got) + } +} + +func TestCORSMiddlewareWithConfig_AllowCredentials(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"http://app.example.com"}, + AllowedMethods: []string{"GET", "POST"}, + AllowCredentials: true, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://app.example.com") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Allow-Origin") != "http://app.example.com" { + t.Errorf("expected origin reflected, got %q", rec.Header().Get("Access-Control-Allow-Origin")) + } + if rec.Header().Get("Access-Control-Allow-Credentials") != "true" { + t.Errorf("expected Access-Control-Allow-Credentials: true, got %q", rec.Header().Get("Access-Control-Allow-Credentials")) + } +} + +func TestCORSMiddlewareWithConfig_NoCredentialsFlagNotSet(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"http://app.example.com"}, + AllowedMethods: []string{"GET"}, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://app.example.com") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Allow-Credentials") != "" { + t.Errorf("expected no Access-Control-Allow-Credentials header, got %q", rec.Header().Get("Access-Control-Allow-Credentials")) + } +} + +func TestCORSMiddlewareWithConfig_MaxAge(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{"GET", "POST"}, + MaxAge: 3600, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://anything.com") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Max-Age") != "3600" { + t.Errorf("expected Access-Control-Max-Age: 3600, got %q", rec.Header().Get("Access-Control-Max-Age")) + } +} + +func TestCORSMiddlewareWithConfig_NoMaxAge(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{"GET"}, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://anything.com") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Max-Age") != "" { + t.Errorf("expected no Access-Control-Max-Age header, got %q", rec.Header().Get("Access-Control-Max-Age")) + } +} + +func TestCORSMiddlewareWithConfig_WildcardSubdomain(t *testing.T) { + m := NewCORSMiddlewareWithConfig("cors", CORSMiddlewareConfig{ + AllowedOrigins: []string{"*.example.com"}, + AllowedMethods: []string{"GET"}, + }) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + tests := []struct { + origin string + allowed bool + }{ + {"http://app.example.com", true}, + {"http://admin.example.com", true}, + // Port should be handled correctly via hostname parsing + {"http://app.example.com:3000", true}, + {"http://evil.com", false}, + {"http://notexample.com", false}, + // Empty origin must not match wildcard + {"", false}, + } + + for _, tt := range tests { + req := httptest.NewRequest("GET", "/test", nil) + if tt.origin != "" { + req.Header.Set("Origin", tt.origin) + } + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + hasHeader := rec.Header().Get("Access-Control-Allow-Origin") != "" + if hasHeader != tt.allowed { + t.Errorf("origin %q: expected allowed=%v, got header=%q", tt.origin, tt.allowed, rec.Header().Get("Access-Control-Allow-Origin")) + } + } +} + +func TestCORSMiddleware_VaryHeader(t *testing.T) { + m := NewCORSMiddleware("cors", []string{"http://localhost:3000"}, []string{"GET"}) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://localhost:3000") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Vary") != "Origin" { + t.Errorf("expected Vary: Origin header, got %q", rec.Header().Get("Vary")) + } +} + +func TestCORSMiddleware_EmptyOriginSkipped(t *testing.T) { + // When no Origin header is sent, CORS headers must not be set. + m := NewCORSMiddleware("cors", []string{"*"}, []string{"GET"}) + + handler := m.Process(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + // No Origin header set + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Allow-Origin") != "" { + t.Errorf("expected no CORS headers when Origin is absent, got %q", rec.Header().Get("Access-Control-Allow-Origin")) + } +} + +func TestCORSMiddlewareWithConfig_ProvidesServices(t *testing.T) { m := NewCORSMiddleware("cors-mw", nil, nil) svcs := m.ProvidesServices() if len(svcs) != 1 { diff --git a/module/openapi.go b/module/openapi.go index b665c23f..2d541cc9 100644 --- a/module/openapi.go +++ b/module/openapi.go @@ -24,8 +24,9 @@ import ( // OpenAPIValidationConfig controls which request/response parts are validated. type OpenAPIValidationConfig struct { - Request bool `yaml:"request" json:"request"` - Response bool `yaml:"response" json:"response"` + Request bool `yaml:"request" json:"request"` + Response bool `yaml:"response" json:"response"` + ResponseAction string `yaml:"response_action" json:"response_action"` // "warn" (default) or "error" } // OpenAPISwaggerUIConfig controls Swagger UI hosting. @@ -97,21 +98,26 @@ type openAPIMediaType struct { // openAPIResponse describes a single response entry. type openAPIResponse struct { - Description string `yaml:"description" json:"description"` + Description string `yaml:"description" json:"description"` + Content map[string]openAPIMediaType `yaml:"content" json:"content"` } // openAPISchema is a minimal JSON Schema subset used for parameter/body validation. type openAPISchema struct { - Type string `yaml:"type" json:"type"` - Required []string `yaml:"required" json:"required"` - Properties map[string]*openAPISchema `yaml:"properties" json:"properties"` - Format string `yaml:"format" json:"format"` - Minimum *float64 `yaml:"minimum" json:"minimum"` - Maximum *float64 `yaml:"maximum" json:"maximum"` - MinLength *int `yaml:"minLength" json:"minLength"` - MaxLength *int `yaml:"maxLength" json:"maxLength"` - Pattern string `yaml:"pattern" json:"pattern"` - Enum []any `yaml:"enum" json:"enum"` + Type string `yaml:"type" json:"type"` + Required []string `yaml:"required" json:"required"` + Properties map[string]*openAPISchema `yaml:"properties" json:"properties"` + Format string `yaml:"format" json:"format"` + Minimum *float64 `yaml:"minimum" json:"minimum"` + Maximum *float64 `yaml:"maximum" json:"maximum"` + MinLength *int `yaml:"minLength" json:"minLength"` + MaxLength *int `yaml:"maxLength" json:"maxLength"` + Pattern string `yaml:"pattern" json:"pattern"` + Enum []any `yaml:"enum" json:"enum"` + Items *openAPISchema `yaml:"items" json:"items"` + MinItems *int `yaml:"minItems" json:"minItems"` + MaxItems *int `yaml:"maxItems" json:"maxItems"` + AdditionalProperties *openAPISchema `yaml:"additionalProperties" json:"additionalProperties"` } // ---- OpenAPIModule ---- @@ -289,15 +295,24 @@ func (m *OpenAPIModule) RegisterRoutes(router HTTPRouter) { // buildRouteHandler creates an HTTPHandler that validates the request (if enabled) // and either executes the linked pipeline (if x-pipeline is set) or returns a 501 -// Not Implemented stub response. +// Not Implemented stub response. When response validation is enabled, the handler +// checks the outgoing response body against the OpenAPI response schema and either +// logs a warning or returns a 500 error depending on the response_action setting. func (m *OpenAPIModule) buildRouteHandler(specPath, method string, op *openAPIOperation) HTTPHandler { validateReq := m.cfg.Validation.Request + validateResp := m.cfg.Validation.Response + responseAction := m.cfg.Validation.ResponseAction + if responseAction == "" { + responseAction = "warn" + } h := &openAPIRouteHandler{ - module: m, - specPath: specPath, - method: method, - op: op, - validateReq: validateReq, + module: m, + specPath: specPath, + method: method, + op: op, + validateReq: validateReq, + validateResp: validateResp, + responseAction: responseAction, } if op.XPipeline != "" { h.pipelineName = op.XPipeline @@ -321,6 +336,8 @@ type openAPIRouteHandler struct { method string op *openAPIOperation validateReq bool + validateResp bool + responseAction string // "warn" or "error" pipelineName string pipelineLookup PipelineLookupFn } @@ -361,7 +378,16 @@ func (h *openAPIRouteHandler) Handle(w http.ResponseWriter, r *http.Request) { data := openAPIExtractRequestData(r) - rw := &trackedResponseWriter{ResponseWriter: w} + // When response validation is enabled, wrap the writer with a capturing + // writer so we can inspect the response body/status before sending. + var cw *responseCapturingWriter + var rw *trackedResponseWriter + if h.validateResp { + cw = newResponseCapturingWriter(w) + rw = &trackedResponseWriter{ResponseWriter: cw} + } else { + rw = &trackedResponseWriter{ResponseWriter: w} + } ctx := context.WithValue(r.Context(), HTTPResponseWriterContextKey, rw) ctx = context.WithValue(ctx, HTTPRequestContextKey, r) @@ -376,20 +402,72 @@ func (h *openAPIRouteHandler) Handle(w http.ResponseWriter, r *http.Request) { _ = json.NewEncoder(w).Encode(map[string]string{ "error": fmt.Sprintf("pipeline execution failed: %v", err), }) + } else if cw != nil { + cw.flush() } return } if rw.written { + // Pipeline wrote directly to the response writer. + if cw != nil { + // Validate the captured response before flushing. + if respErrs := h.validateResponse(cw.statusCode, cw.Header(), cw.body.Bytes()); len(respErrs) > 0 { + if h.responseAction == "error" { + // Discard the buffered response and return a 500 with validation errors. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _ = json.NewEncoder(w).Encode(map[string]any{ + "error": "response validation failed", + "errors": respErrs, + }) + return + } + h.module.logger.Warn("OpenAPI response validation failed", + "module", h.module.name, + "path", h.specPath, + "method", h.method, + "errors", respErrs, + ) + } + cw.flush() + } return } // If the pipeline set response_status in its output (without writing // directly to the response writer), use those values to build the response. - if writePipelineContextResponse(w, result.Current) { - return + if h.validateResp { + if h.writeAndValidatePipelineResponse(w, result.Current) { + return + } + } else { + if writePipelineContextResponse(w, result.Current) { + return + } } + // Default: 200 with JSON-encoded pipeline state. + respBody, _ := json.Marshal(result.Current) + if h.validateResp { + if respErrs := h.validateResponse(http.StatusOK, http.Header{"Content-Type": []string{"application/json"}}, respBody); len(respErrs) > 0 { + if h.responseAction == "error" { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _ = json.NewEncoder(w).Encode(map[string]any{ + "error": "response validation failed", + "errors": respErrs, + }) + return + } + h.module.logger.Warn("OpenAPI response validation failed", + "module", h.module.name, + "path", h.specPath, + "method", h.method, + "errors", respErrs, + ) + } + } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(result.Current) @@ -491,7 +569,7 @@ func (h *openAPIRouteHandler) validate(r *http.Request) []string { var bodyData any if jsonErr := json.Unmarshal(bodyBytes, &bodyData); jsonErr != nil { errs = append(errs, fmt.Sprintf("request body contains invalid JSON: %v", jsonErr)) - } else if bodyErrs := validateJSONValue(bodyData, "body", mediaType.Schema); len(bodyErrs) > 0 { + } else if bodyErrs := validateJSONValue(bodyData, "request body", mediaType.Schema); len(bodyErrs) > 0 { errs = append(errs, bodyErrs...) } } @@ -502,6 +580,200 @@ func (h *openAPIRouteHandler) validate(r *http.Request) []string { return errs } +// ---- Response validation ---- + +// responseCapturingWriter buffers the response body, status code, and headers +// so we can validate them against the OpenAPI spec before sending to the client. +// It uses its own header map to prevent leaked headers reaching the client when +// validation fails and a different (500) response needs to be sent. +type responseCapturingWriter struct { + underlying http.ResponseWriter + headers http.Header // own header map; copied to underlying only on flush + body bytes.Buffer + statusCode int + headerSent bool +} + +func newResponseCapturingWriter(w http.ResponseWriter) *responseCapturingWriter { + return &responseCapturingWriter{ + underlying: w, + headers: make(http.Header), + statusCode: http.StatusOK, + } +} + +// Header returns this writer's own header map so that callers can set headers +// which are only forwarded to the underlying writer when flush() is called. +func (c *responseCapturingWriter) Header() http.Header { + return c.headers +} + +// Write captures the response body into an internal buffer. +func (c *responseCapturingWriter) Write(b []byte) (int, error) { + return c.body.Write(b) +} + +// WriteHeader captures the status code without sending it yet. +func (c *responseCapturingWriter) WriteHeader(code int) { + c.statusCode = code +} + +// flush copies captured headers and sends the buffered status code and body to the underlying writer. +func (c *responseCapturingWriter) flush() { + if c.headerSent { + return + } + c.headerSent = true + // Copy captured headers to the underlying writer before sending the status code. + for k, vals := range c.headers { + for _, v := range vals { + c.underlying.Header().Add(k, v) + } + } + c.underlying.WriteHeader(c.statusCode) + _, _ = c.underlying.Write(c.body.Bytes()) //nolint:gosec // G705: body is pipeline output, written back to same response +} + +// validateResponse validates the response status code, content type, and body +// against the OpenAPI spec for this operation. Returns a list of validation errors. +func (h *openAPIRouteHandler) validateResponse(statusCode int, headers http.Header, body []byte) []string { + var errs []string + + if h.op.Responses == nil { + return nil + } + + // Look up the response spec by exact status code, then fall back to "default". + statusStr := strconv.Itoa(statusCode) + respSpec, ok := h.op.Responses[statusStr] + if !ok { + // Try wildcard status codes: 2XX, 3XX, etc. + wildcardStatus := string(statusStr[0]) + "XX" + respSpec, ok = h.op.Responses[wildcardStatus] + } + if !ok { + respSpec, ok = h.op.Responses["default"] + } + if !ok { + // No spec defined for this status code — nothing to validate. + return nil + } + + // If no content is defined in the response spec, skip body validation. + if len(respSpec.Content) == 0 { + return nil + } + + // Determine the response content type. + ct := headers.Get("Content-Type") + if idx := strings.Index(ct, ";"); idx >= 0 { + ct = strings.TrimSpace(ct[:idx]) + } + if ct == "" { + ct = "application/json" // default assumption for JSON APIs + } + + mediaType, ok := respSpec.Content[ct] + if !ok { + // Try wildcard content types (e.g., application/*) + for specCT, mt := range respSpec.Content { + if strings.HasSuffix(specCT, "/*") { + prefix := strings.TrimSuffix(specCT, "*") + if strings.HasPrefix(ct, prefix) { + mediaType = mt + ok = true + break + } + } + } + } + if !ok { + errs = append(errs, fmt.Sprintf("response Content-Type %q not defined in spec; spec defines: %s", + ct, supportedContentTypes(respSpec.Content))) + return errs + } + + if mediaType.Schema == nil || len(body) == 0 { + return nil + } + + // Parse and validate the response body against the schema. + var bodyData any + if jsonErr := json.Unmarshal(body, &bodyData); jsonErr != nil { + errs = append(errs, fmt.Sprintf("response body contains invalid JSON: %v", jsonErr)) + return errs + } + + if bodyErrs := validateJSONValue(bodyData, "response body", mediaType.Schema); len(bodyErrs) > 0 { + errs = append(errs, bodyErrs...) + } + + return errs +} + +// writeAndValidatePipelineResponse is like writePipelineContextResponse but also +// validates the response against the OpenAPI spec when response validation is enabled. +func (h *openAPIRouteHandler) writeAndValidatePipelineResponse(w http.ResponseWriter, result map[string]any) bool { + rawStatus, ok := result["response_status"] + if !ok { + return false + } + status, ok := coercePipelineStatus(rawStatus) + if !ok { + return false + } + + hdrs := http.Header{} + if rawHeaders, ok := result["response_headers"]; ok { + // Build a temporary header map for validation + switch hv := rawHeaders.(type) { + case map[string]any: + for k, v := range hv { + hdrs.Set(k, fmt.Sprintf("%v", v)) + } + case map[string]string: + for k, v := range hv { + hdrs.Set(k, v) + } + case http.Header: + hdrs = hv + } + } + + var bodyBytes []byte + if body, ok := result["response_body"].(string); ok { + bodyBytes = []byte(body) + } + + if respErrs := h.validateResponse(status, hdrs, bodyBytes); len(respErrs) > 0 { + if h.responseAction == "error" { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _ = json.NewEncoder(w).Encode(map[string]any{ + "error": "response validation failed", + "errors": respErrs, + }) + return true + } + h.module.logger.Warn("OpenAPI response validation failed", + "module", h.module.name, + "path", h.specPath, + "method", h.method, + "errors", respErrs, + ) + } + + // Write the actual response + if rawHeaders, ok := result["response_headers"]; ok { + applyPipelineHeaders(w, rawHeaders) + } + w.WriteHeader(status) + if body, ok := result["response_body"].(string); ok { + _, _ = w.Write([]byte(body)) + } + return true +} + // ---- openAPISpecHandler ---- type openAPISpecHandler struct { @@ -680,19 +952,21 @@ func validateScalarValue(val, name, kind string, schema *openAPISchema) []string } // validateJSONBody validates a decoded JSON body against an object schema. -func validateJSONBody(body any, schema *openAPISchema) []string { +// The bodyLabel parameter (e.g. "request body" or "response body") is used in +// error messages to distinguish validation context. +func validateJSONBody(body any, schema *openAPISchema, bodyLabel string) []string { var errs []string obj, ok := body.(map[string]any) if !ok { if schema.Type == "object" { - return []string{"request body must be a JSON object"} + return []string{bodyLabel + " must be a JSON object"} } return nil } // Check required fields for _, req := range schema.Required { if _, present := obj[req]; !present { - errs = append(errs, fmt.Sprintf("request body: required field %q is missing", req)) + errs = append(errs, fmt.Sprintf("%s: required field %q is missing", bodyLabel, req)) } } // Validate individual properties @@ -705,6 +979,18 @@ func validateJSONBody(body any, schema *openAPISchema) []string { errs = append(errs, fieldErrs...) } } + // Validate additionalProperties: keys not declared in Properties are checked + // against the additionalProperties schema when it is specified. + if schema.AdditionalProperties != nil { + for key, val := range obj { + if _, defined := schema.Properties[key]; defined { + continue + } + if fieldErrs := validateJSONValue(val, key, schema.AdditionalProperties); len(fieldErrs) > 0 { + errs = append(errs, fieldErrs...) + } + } + } return errs } @@ -776,9 +1062,28 @@ func validateJSONValue(val any, name string, schema *openAPISchema) []string { errs = append(errs, fmt.Sprintf("field %q must be a boolean, got %T", name, val)) } case "object": - if subErrs := validateJSONBody(val, schema); len(subErrs) > 0 { + if subErrs := validateJSONBody(val, schema, name); len(subErrs) > 0 { errs = append(errs, subErrs...) } + case "array": + arr, ok := val.([]any) + if !ok { + return []string{fmt.Sprintf("field %q must be an array, got %T", name, val)} + } + if schema.MinItems != nil && len(arr) < *schema.MinItems { + errs = append(errs, fmt.Sprintf("field %q must have at least %d items, got %d", name, *schema.MinItems, len(arr))) + } + if schema.MaxItems != nil && len(arr) > *schema.MaxItems { + errs = append(errs, fmt.Sprintf("field %q must have at most %d items, got %d", name, *schema.MaxItems, len(arr))) + } + if schema.Items != nil { + for i, item := range arr { + itemName := fmt.Sprintf("%s[%d]", name, i) + if itemErrs := validateJSONValue(item, itemName, schema.Items); len(itemErrs) > 0 { + errs = append(errs, itemErrs...) + } + } + } } // Enum validation: use type-aware comparison to prevent e.g. int 1 matching string "1". if len(schema.Enum) > 0 { diff --git a/module/openapi_test.go b/module/openapi_test.go index f9c22fb2..276bad30 100644 --- a/module/openapi_test.go +++ b/module/openapi_test.go @@ -1400,3 +1400,997 @@ func TestOpenAPIModule_XPipeline_ResponseStatus_Float64(t *testing.T) { t.Errorf("unexpected body: %q", w.Body.String()) } } + +// ---- Response validation spec fixtures ---- + +const responseValidationYAML = ` +openapi: "3.0.0" +info: + title: Response Validation API + version: "1.0.0" +paths: + /pets: + get: + operationId: listPets + summary: List all pets + x-pipeline: list-pets-pipeline + responses: + "200": + description: A list of pets + content: + application/json: + schema: + type: array + items: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + minLength: 1 + tag: + type: string + post: + operationId: createPet + summary: Create a pet + x-pipeline: create-pet-pipeline + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - name + properties: + name: + type: string + responses: + "201": + description: Created pet + content: + application/json: + schema: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + /pets/{petId}: + get: + operationId: getPet + summary: Get a pet by ID + x-pipeline: get-pet-pipeline + parameters: + - name: petId + in: path + required: true + schema: + type: integer + responses: + "200": + description: A single pet + content: + application/json: + schema: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + tag: + type: string + "404": + description: Pet not found + content: + application/json: + schema: + type: object + required: + - error + properties: + error: + type: string + /no-response-schema: + get: + operationId: noSchema + summary: Endpoint with no response schema + x-pipeline: no-schema-pipeline + responses: + "200": + description: No content schema defined +` + +// JSON:API style response spec for complex validation scenarios +const jsonAPIResponseYAML = ` +openapi: "3.0.0" +info: + title: JSON:API Response Validation + version: "1.0.0" +paths: + /articles: + get: + operationId: listArticles + summary: List articles (JSON:API format) + x-pipeline: list-articles-pipeline + responses: + "200": + description: JSON:API compliant response + content: + application/vnd.api+json: + schema: + type: object + required: + - data + properties: + data: + type: array + items: + type: object + required: + - type + - id + - attributes + properties: + type: + type: string + id: + type: string + attributes: + type: object + required: + - title + properties: + title: + type: string + body: + type: string + relationships: + type: object + properties: + author: + type: object + properties: + data: + type: object + required: + - type + - id + properties: + type: + type: string + id: + type: string + meta: + type: object + properties: + total: + type: integer + links: + type: object + properties: + self: + type: string + next: + type: string +` + +// ---- Response validation tests ---- + +func TestOpenAPIModule_ResponseValidation_ValidResponse(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline returns a valid array of pets + step := &stubPipelineStep{ + name: "list-pets", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"id":1,"name":"Fido","tag":"dog"},{"id":2,"name":"Whiskers"}]`, + "response_headers": map[string]any{ + "Content-Type": "application/json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-pets-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-pets-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/pets") + if h == nil { + t.Fatal("GET /api/pets handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/pets", nil) + h.Handle(w, r) + + if w.Code != http.StatusOK { + t.Errorf("expected 200, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_InvalidResponse_ErrorAction(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline returns a pet missing the required "name" field + step := &stubPipelineStep{ + name: "list-pets", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"id":1}]`, + "response_headers": map[string]any{ + "Content-Type": "application/json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-pets-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-pets-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/pets") + if h == nil { + t.Fatal("GET /api/pets handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/pets", nil) + h.Handle(w, r) + + // With action=error, we expect a 500 response with validation errors + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500, got %d: %s", w.Code, w.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("expected JSON error body: %v", err) + } + if resp["error"] != "response validation failed" { + t.Errorf("expected 'response validation failed' error, got %v", resp["error"]) + } + errs, ok := resp["errors"].([]any) + if !ok || len(errs) == 0 { + t.Errorf("expected validation errors, got %v", resp["errors"]) + } +} + +func TestOpenAPIModule_ResponseValidation_InvalidResponse_WarnAction(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "warn"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline returns a pet missing the required "name" field + step := &stubPipelineStep{ + name: "list-pets", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"id":1}]`, + "response_headers": map[string]any{ + "Content-Type": "application/json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-pets-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-pets-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/pets") + if h == nil { + t.Fatal("GET /api/pets handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/pets", nil) + h.Handle(w, r) + + // With action=warn, the response should still be sent (200) + if w.Code != http.StatusOK { + t.Errorf("expected 200 (warning only), got %d: %s", w.Code, w.Body.String()) + } + // Body should be the original pipeline body + if w.Body.String() != `[{"id":1}]` { + t.Errorf("expected original pipeline body, got %q", w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_DefaultFallback_InvalidFallback(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline returns output without response_status — falls through to 200 default + step := &stubPipelineStep{ + name: "list-pets", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "result": []any{ + map[string]any{"id": float64(1), "name": "Fido"}, + }, + }, + }, nil + }, + } + pipe := &Pipeline{Name: "list-pets-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-pets-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/pets") + if h == nil { + t.Fatal("GET /api/pets handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/pets", nil) + h.Handle(w, r) + + // The spec expects an array at the top level, but we're sending an object + // (the full pipeline state). This should fail validation in error mode. + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 (response is object, spec expects array), got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_NoSchema_Passes(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + step := &stubPipelineStep{ + name: "no-schema", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `{"anything":"goes"}`, + "response_headers": map[string]any{ + "Content-Type": "application/json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "no-schema-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "no-schema-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/no-response-schema") + if h == nil { + t.Fatal("GET /api/no-response-schema handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/no-response-schema", nil) + h.Handle(w, r) + + // No schema defined — response should pass through + if w.Code != http.StatusOK { + t.Errorf("expected 200 (no schema to validate against), got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_JSONAPI_ValidResponse(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", jsonAPIResponseYAML) + + mod := NewOpenAPIModule("jsonapi", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // A valid JSON:API response + validBody := `{ + "data": [ + { + "type": "articles", + "id": "1", + "attributes": { + "title": "Hello World", + "body": "This is my first article." + }, + "relationships": { + "author": { + "data": {"type": "people", "id": "42"} + } + } + } + ], + "meta": {"total": 1}, + "links": {"self": "/articles"} + }` + + step := &stubPipelineStep{ + name: "list-articles", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": validBody, + "response_headers": map[string]any{ + "Content-Type": "application/vnd.api+json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-articles-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-articles-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/articles") + if h == nil { + t.Fatal("GET /api/articles handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/articles", nil) + h.Handle(w, r) + + if w.Code != http.StatusOK { + t.Errorf("expected 200 for valid JSON:API response, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_JSONAPI_InvalidResponse(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", jsonAPIResponseYAML) + + mod := NewOpenAPIModule("jsonapi", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Invalid JSON:API response — missing required "type" and "attributes" in data items + invalidBody := `{ + "data": [ + { + "id": "1" + } + ] + }` + + step := &stubPipelineStep{ + name: "list-articles", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": invalidBody, + "response_headers": map[string]any{ + "Content-Type": "application/vnd.api+json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-articles-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-articles-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/articles") + if h == nil { + t.Fatal("GET /api/articles handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/articles", nil) + h.Handle(w, r) + + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 for invalid JSON:API response, got %d: %s", w.Code, w.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("expected JSON error body: %v", err) + } + errs, ok := resp["errors"].([]any) + if !ok || len(errs) == 0 { + t.Fatalf("expected validation errors, got %v", resp["errors"]) + } + + // Check that it caught missing required fields + errStr := strings.Join(func() []string { + ss := make([]string, len(errs)) + for i, e := range errs { + ss[i] = e.(string) + } + return ss + }(), " ") + if !strings.Contains(errStr, "type") { + t.Errorf("expected error about missing 'type' field, got: %s", errStr) + } + if !strings.Contains(errStr, "attributes") { + t.Errorf("expected error about missing 'attributes' field, got: %s", errStr) + } +} + +func TestOpenAPIModule_ResponseValidation_JSONAPI_WrongContentType(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", jsonAPIResponseYAML) + + mod := NewOpenAPIModule("jsonapi", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Response with wrong content type (application/json instead of application/vnd.api+json) + step := &stubPipelineStep{ + name: "list-articles", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `{"data":[]}`, + "response_headers": map[string]any{ + "Content-Type": "application/json", + }, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-articles-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-articles-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/articles") + if h == nil { + t.Fatal("GET /api/articles handler not found") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/articles", nil) + h.Handle(w, r) + + // Should fail because the Content-Type doesn't match the spec + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 for wrong content type, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_DirectWrite(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline step writes directly to the response writer with an invalid response + step := &stubPipelineStep{ + name: "create-pet", + exec: func(ctx context.Context, pc *PipelineContext) (*StepResult, error) { + rw := ctx.Value(HTTPResponseWriterContextKey).(http.ResponseWriter) + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusCreated) + _, _ = rw.Write([]byte(`{"wrong":"fields"}`)) + return &StepResult{Output: map[string]any{}}, nil + }, + } + pipe := &Pipeline{Name: "create-pet-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "create-pet-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("POST", "/api/pets") + if h == nil { + t.Fatal("POST /api/pets handler not found") + } + + w := httptest.NewRecorder() + body := strings.NewReader(`{"name":"Fido"}`) + r := httptest.NewRequest(http.MethodPost, "/api/pets", body) + r.Header.Set("Content-Type", "application/json") + h.Handle(w, r) + + // With error action, the invalid response should be blocked + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 for invalid direct-write response, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_DirectWrite_Valid(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + // Pipeline step writes a valid response directly + step := &stubPipelineStep{ + name: "create-pet", + exec: func(ctx context.Context, pc *PipelineContext) (*StepResult, error) { + rw := ctx.Value(HTTPResponseWriterContextKey).(http.ResponseWriter) + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusCreated) + _, _ = rw.Write([]byte(`{"id":1,"name":"Fido"}`)) + return &StepResult{Output: map[string]any{}}, nil + }, + } + pipe := &Pipeline{Name: "create-pet-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "create-pet-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("POST", "/api/pets") + if h == nil { + t.Fatal("POST /api/pets handler not found") + } + + w := httptest.NewRecorder() + body := strings.NewReader(`{"name":"Fido"}`) + r := httptest.NewRequest(http.MethodPost, "/api/pets", body) + r.Header.Set("Content-Type", "application/json") + h.Handle(w, r) + + if w.Code != http.StatusCreated { + t.Errorf("expected 201 for valid response, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestOpenAPIModule_ResponseValidation_ArrayConstraints(t *testing.T) { + const arrayConstraintYAML = ` +openapi: "3.0.0" +info: + title: Array Constraint API + version: "1.0.0" +paths: + /items: + get: + operationId: listItems + x-pipeline: list-items + responses: + "200": + description: Items list + content: + application/json: + schema: + type: array + minItems: 1 + maxItems: 3 + items: + type: object + required: + - name + properties: + name: + type: string +` + specPath := writeTempSpec(t, ".yaml", arrayConstraintYAML) + + t.Run("too_few_items", func(t *testing.T) { + mod := NewOpenAPIModule("arr-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + step := &stubPipelineStep{ + name: "list-items", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[]`, + "response_headers": map[string]any{"Content-Type": "application/json"}, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-items", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-items" { + return pipe, true + } + return nil, false + }) + router := &testRouter{} + mod.RegisterRoutes(router) + h := router.findHandler("GET", "/api/items") + if h == nil { + t.Fatal("route GET /api/items not registered") + } + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/items", nil) + h.Handle(w, r) + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 for too few items, got %d: %s", w.Code, w.Body.String()) + } + }) + + t.Run("too_many_items", func(t *testing.T) { + mod := NewOpenAPIModule("arr-api2", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + step := &stubPipelineStep{ + name: "list-items", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"name":"a"},{"name":"b"},{"name":"c"},{"name":"d"}]`, + "response_headers": map[string]any{"Content-Type": "application/json"}, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-items", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-items" { + return pipe, true + } + return nil, false + }) + router := &testRouter{} + mod.RegisterRoutes(router) + h := router.findHandler("GET", "/api/items") + if h == nil { + t.Fatal("route GET /api/items not registered") + } + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/items", nil) + h.Handle(w, r) + if w.Code != http.StatusInternalServerError { + t.Errorf("expected 500 for too many items, got %d: %s", w.Code, w.Body.String()) + } + }) + + t.Run("valid_array", func(t *testing.T) { + mod := NewOpenAPIModule("arr-api3", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true, ResponseAction: "error"}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + step := &stubPipelineStep{ + name: "list-items", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"name":"a"},{"name":"b"}]`, + "response_headers": map[string]any{"Content-Type": "application/json"}, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-items", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-items" { + return pipe, true + } + return nil, false + }) + router := &testRouter{} + mod.RegisterRoutes(router) + h := router.findHandler("GET", "/api/items") + if h == nil { + t.Fatal("route GET /api/items not registered") + } + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/items", nil) + h.Handle(w, r) + if w.Code != http.StatusOK { + t.Errorf("expected 200 for valid array, got %d: %s", w.Code, w.Body.String()) + } + }) +} + +func TestOpenAPIModule_ResponseValidation_DefaultAction_IsWarn(t *testing.T) { + specPath := writeTempSpec(t, ".yaml", responseValidationYAML) + + // No ResponseAction specified — should default to "warn" + mod := NewOpenAPIModule("resp-api", OpenAPIConfig{ + SpecFile: specPath, + BasePath: "/api", + Validation: OpenAPIValidationConfig{Response: true}, + }) + if err := mod.Init(nil); err != nil { + t.Fatalf("Init: %v", err) + } + + step := &stubPipelineStep{ + name: "list-pets", + exec: func(_ context.Context, _ *PipelineContext) (*StepResult, error) { + return &StepResult{ + Output: map[string]any{ + "response_status": 200, + "response_body": `[{"id":1}]`, // missing required "name" + "response_headers": map[string]any{"Content-Type": "application/json"}, + }, + Stop: true, + }, nil + }, + } + pipe := &Pipeline{Name: "list-pets-pipeline", Steps: []PipelineStep{step}} + mod.SetPipelineLookup(func(name string) (*Pipeline, bool) { + if name == "list-pets-pipeline" { + return pipe, true + } + return nil, false + }) + + router := &testRouter{} + mod.RegisterRoutes(router) + + h := router.findHandler("GET", "/api/pets") + if h == nil { + t.Fatal("handler for GET /api/pets not found") + } + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/pets", nil) + h.Handle(w, r) + + // Default action is warn, so response should pass through + if w.Code != http.StatusOK { + t.Errorf("expected 200 with default warn action, got %d: %s", w.Code, w.Body.String()) + } +} diff --git a/module/pipeline_step_db_query.go b/module/pipeline_step_db_query.go index 9de097c1..cd94ff64 100644 --- a/module/pipeline_step_db_query.go +++ b/module/pipeline_step_db_query.go @@ -193,9 +193,11 @@ func (s *DBQueryStep) Execute(_ context.Context, pc *PipelineContext) (*StepResu row := make(map[string]any, len(columns)) for i, col := range columns { val := values[i] - // Convert []byte to string for readability + // Convert []byte: try JSON parse first (handles PostgreSQL json/jsonb + // column types returned by the pgx driver as raw JSON bytes), then + // fall back to string conversion for non-JSON byte data (e.g. bytea). if b, ok := val.([]byte); ok { - row[col] = string(b) + row[col] = parseJSONBytesOrString(b) } else { row[col] = val } diff --git a/module/pipeline_step_db_query_cached.go b/module/pipeline_step_db_query_cached.go index beaf36dd..fe1fad91 100644 --- a/module/pipeline_step_db_query_cached.go +++ b/module/pipeline_step_db_query_cached.go @@ -249,7 +249,7 @@ func (s *DBQueryCachedStep) runQuery(ctx context.Context, pc *PipelineContext) ( } val := values[i] if b, ok := val.([]byte); ok { - row[col] = string(b) + row[col] = parseJSONBytesOrString(b) } else { row[col] = val } @@ -290,7 +290,7 @@ func (s *DBQueryCachedStep) runQuery(ctx context.Context, pc *PipelineContext) ( } val := values[i] if b, ok := val.([]byte); ok { - output[col] = string(b) + output[col] = parseJSONBytesOrString(b) } else { output[col] = val } diff --git a/module/pipeline_step_db_query_test.go b/module/pipeline_step_db_query_test.go index e704faf4..21b43f34 100644 --- a/module/pipeline_step_db_query_test.go +++ b/module/pipeline_step_db_query_test.go @@ -370,3 +370,85 @@ func TestDBQueryStep_EmptyResult(t *testing.T) { t.Errorf("expected count=0, got %d", count) } } + +// TestParseJSONBytesOrString exercises the helper used by the db_query scanner +// to transparently parse PostgreSQL json/jsonb column bytes. +func TestParseJSONBytesOrString(t *testing.T) { + tests := []struct { + name string + input []byte + want any + }{ + { + name: "json object", + input: []byte(`{"id":1,"type":"follow-ups"}`), + want: map[string]any{"id": float64(1), "type": "follow-ups"}, + }, + { + name: "json array", + input: []byte(`[{"id":1},{"id":2}]`), + want: []any{map[string]any{"id": float64(1)}, map[string]any{"id": float64(2)}}, + }, + { + name: "json string", + input: []byte(`"hello"`), + want: "hello", + }, + { + name: "json number", + input: []byte(`42`), + want: float64(42), + }, + { + name: "json bool", + input: []byte(`true`), + want: true, + }, + { + name: "json null", + input: []byte(`null`), + want: nil, + }, + { + name: "binary / non-json bytes", + input: []byte{0x89, 0x50, 0x4e, 0x47}, // PNG magic bytes + want: string([]byte{0x89, 0x50, 0x4e, 0x47}), + }, + { + name: "empty bytes", + input: []byte{}, + want: "", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := parseJSONBytesOrString(tc.input) + // Use JSON round-trip for equality check to handle map/slice cases. + switch expected := tc.want.(type) { + case map[string]any: + m, ok := got.(map[string]any) + if !ok { + t.Fatalf("expected map[string]any, got %T", got) + } + for k, v := range expected { + if m[k] != v { + t.Errorf("key %q: expected %v, got %v", k, v, m[k]) + } + } + case []any: + sl, ok := got.([]any) + if !ok { + t.Fatalf("expected []any, got %T", got) + } + if len(sl) != len(expected) { + t.Fatalf("expected len %d, got %d", len(expected), len(sl)) + } + default: + if got != tc.want { + t.Errorf("expected %v (%T), got %v (%T)", tc.want, tc.want, got, got) + } + } + }) + } +} diff --git a/module/pipeline_step_json_parse.go b/module/pipeline_step_json_parse.go new file mode 100644 index 00000000..a06f7a5a --- /dev/null +++ b/module/pipeline_step_json_parse.go @@ -0,0 +1,82 @@ +package module + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/GoCodeAlone/modular" +) + +// JSONParseStep parses a JSON string value from the pipeline context into a +// structured Go value (map, slice, etc.) and stores the result as step output. +// +// This is useful when a pipeline step (e.g. step.db_query against a legacy +// driver, or step.http_call) returns a JSON column/field as a raw string rather +// than as a pre-parsed Go type. It is the explicit counterpart to the automatic +// json/jsonb detection that step.db_query performs for the pgx driver. +// +// Configuration: +// +// source: "steps.fetch.row.json_column" # dot-path to the JSON string value (required) +// target: "parsed_data" # output key name (optional, defaults to "value") +type JSONParseStep struct { + name string + source string + target string +} + +// NewJSONParseStepFactory returns a StepFactory that creates JSONParseStep instances. +func NewJSONParseStepFactory() StepFactory { + return func(name string, config map[string]any, _ modular.Application) (PipelineStep, error) { + source, _ := config["source"].(string) + if source == "" { + return nil, fmt.Errorf("json_parse step %q: 'source' is required", name) + } + + target, _ := config["target"].(string) + if target == "" { + target = "value" + } + + return &JSONParseStep{ + name: name, + source: source, + target: target, + }, nil + } +} + +// Name returns the step name. +func (s *JSONParseStep) Name() string { return s.name } + +// Execute resolves the source path, parses the value as JSON if it is a string, +// and stores the result under the configured target key. +func (s *JSONParseStep) Execute(_ context.Context, pc *PipelineContext) (*StepResult, error) { + raw := resolveBodyFrom(s.source, pc) + if raw == nil { + return nil, fmt.Errorf("json_parse step %q: source %q not found or resolved to nil", s.name, s.source) + } + + var parsed any + switch v := raw.(type) { + case string: + if err := json.Unmarshal([]byte(v), &parsed); err != nil { + return nil, fmt.Errorf("json_parse step %q: failed to parse JSON from %q: %w", s.name, s.source, err) + } + case []byte: + if err := json.Unmarshal(v, &parsed); err != nil { + return nil, fmt.Errorf("json_parse step %q: failed to parse JSON bytes from %q: %w", s.name, s.source, err) + } + default: + // Value is already a structured type (map, slice, number, bool, nil). + // Pass it through unchanged so that pipelines are idempotent when the + // upstream step already returns a parsed value (e.g. after the db_query + // fix lands, json_parse is a no-op for json/jsonb columns). + parsed = raw + } + + return &StepResult{Output: map[string]any{ + s.target: parsed, + }}, nil +} diff --git a/module/pipeline_step_json_parse_test.go b/module/pipeline_step_json_parse_test.go new file mode 100644 index 00000000..418c01a4 --- /dev/null +++ b/module/pipeline_step_json_parse_test.go @@ -0,0 +1,216 @@ +package module + +import ( + "context" + "testing" +) + +func TestJSONParseStep_StringJSON(t *testing.T) { + factory := NewJSONParseStepFactory() + step, err := factory("parse-json", map[string]any{ + "source": "steps.fetch.row.data", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{ + "data": `[{"id":1,"type":"follow-ups"}]`, + }, + }) + + result, err := step.Execute(context.Background(), pc) + if err != nil { + t.Fatalf("execute error: %v", err) + } + + parsed, ok := result.Output["value"].([]any) + if !ok { + t.Fatalf("expected []any, got %T: %v", result.Output["value"], result.Output["value"]) + } + if len(parsed) != 1 { + t.Fatalf("expected 1 element, got %d", len(parsed)) + } + obj, ok := parsed[0].(map[string]any) + if !ok { + t.Fatalf("expected map[string]any element, got %T", parsed[0]) + } + // JSON numbers unmarshal to float64 by default. + if obj["id"] != float64(1) { + t.Errorf("expected id=1, got %v", obj["id"]) + } + if obj["type"] != "follow-ups" { + t.Errorf("expected type='follow-ups', got %v", obj["type"]) + } +} + +func TestJSONParseStep_StringJSONObject(t *testing.T) { + factory := NewJSONParseStepFactory() + step, err := factory("parse-obj", map[string]any{ + "source": "steps.fetch.row.meta", + "target": "parsed_meta", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{ + "meta": `{"total":42,"page":1}`, + }, + }) + + result, err := step.Execute(context.Background(), pc) + if err != nil { + t.Fatalf("execute error: %v", err) + } + + parsed, ok := result.Output["parsed_meta"].(map[string]any) + if !ok { + t.Fatalf("expected map[string]any, got %T", result.Output["parsed_meta"]) + } + if parsed["total"] != float64(42) { + t.Errorf("expected total=42, got %v", parsed["total"]) + } + if parsed["page"] != float64(1) { + t.Errorf("expected page=1, got %v", parsed["page"]) + } +} + +func TestJSONParseStep_ByteSliceJSON(t *testing.T) { + factory := NewJSONParseStepFactory() + step, err := factory("parse-bytes", map[string]any{ + "source": "steps.fetch.row.jsonb_col", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{ + "jsonb_col": []byte(`{"key":"value"}`), + }, + }) + + result, err := step.Execute(context.Background(), pc) + if err != nil { + t.Fatalf("execute error: %v", err) + } + + parsed, ok := result.Output["value"].(map[string]any) + if !ok { + t.Fatalf("expected map[string]any, got %T", result.Output["value"]) + } + if parsed["key"] != "value" { + t.Errorf("expected key='value', got %v", parsed["key"]) + } +} + +func TestJSONParseStep_AlreadyParsed(t *testing.T) { + // When the upstream step already returns a structured value (map/slice), + // json_parse should pass it through unchanged. + factory := NewJSONParseStepFactory() + step, err := factory("no-op-parse", map[string]any{ + "source": "steps.fetch.row.data", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + original := map[string]any{"id": 1, "name": "test"} + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{ + "data": original, + }, + }) + + result, err := step.Execute(context.Background(), pc) + if err != nil { + t.Fatalf("execute error: %v", err) + } + + parsed, ok := result.Output["value"].(map[string]any) + if !ok { + t.Fatalf("expected map[string]any, got %T", result.Output["value"]) + } + if parsed["name"] != "test" { + t.Errorf("expected name='test', got %v", parsed["name"]) + } +} + +func TestJSONParseStep_InvalidJSON(t *testing.T) { + factory := NewJSONParseStepFactory() + step, err := factory("bad-json", map[string]any{ + "source": "steps.fetch.row.data", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{ + "data": "not valid json {{{", + }, + }) + + _, err = step.Execute(context.Background(), pc) + if err == nil { + t.Fatal("expected error for invalid JSON") + } +} + +func TestJSONParseStep_MissingSource(t *testing.T) { + factory := NewJSONParseStepFactory() + _, err := factory("no-source", map[string]any{}, nil) + if err == nil { + t.Fatal("expected error for missing source") + } +} + +func TestJSONParseStep_UnresolvablePath(t *testing.T) { + // A typo in source should fail fast rather than silently producing nil. + factory := NewJSONParseStepFactory() + step, err := factory("bad-path", map[string]any{ + "source": "steps.nonexistent.field", + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + _, err = step.Execute(context.Background(), pc) + if err == nil { + t.Fatal("expected error when source path resolves to nil") + } +} + +func TestJSONParseStep_DefaultTargetKey(t *testing.T) { + factory := NewJSONParseStepFactory() + step, err := factory("default-target", map[string]any{ + "source": "steps.fetch.row.data", + // no "target" config — should default to "value" + }, nil) + if err != nil { + t.Fatalf("factory error: %v", err) + } + + pc := NewPipelineContext(nil, nil) + pc.MergeStepOutput("fetch", map[string]any{ + "row": map[string]any{"data": `{"ok":true}`}, + }) + + result, err := step.Execute(context.Background(), pc) + if err != nil { + t.Fatalf("execute error: %v", err) + } + + if _, hasValue := result.Output["value"]; !hasValue { + t.Errorf("expected 'value' key in output, got keys: %v", result.Output) + } +} diff --git a/module/pipeline_step_resilience.go b/module/pipeline_step_resilience.go index 47659930..e578f4c1 100644 --- a/module/pipeline_step_resilience.go +++ b/module/pipeline_step_resilience.go @@ -276,6 +276,13 @@ func buildSubStep(parentName, field string, cfg map[string]any, registryFn func( stepName = fmt.Sprintf("%s-%s", parentName, field) } + // If a nested "config" key exists (matching engine's pipeline step YAML format), + // unwrap it so the factory receives the inner config directly. + if innerCfg, ok := cfg["config"].(map[string]any); ok { + return registry.Create(stepType, stepName, innerCfg, app) + } + + // Legacy/flat format: pass everything except type and name. subCfg := make(map[string]any, len(cfg)) for k, v := range cfg { if k != "type" && k != "name" { diff --git a/module/platform_do_database.go b/module/platform_do_database.go index 8b550f33..ea44c7a5 100644 --- a/module/platform_do_database.go +++ b/module/platform_do_database.go @@ -13,12 +13,12 @@ import ( type DODatabaseState struct { ID string `json:"id"` Name string `json:"name"` - Engine string `json:"engine"` // pg, mysql, redis, mongodb, kafka + Engine string `json:"engine"` // pg, mysql, redis, mongodb, kafka Version string `json:"version"` - Size string `json:"size"` // e.g. db-s-1vcpu-1gb + Size string `json:"size"` // e.g. db-s-1vcpu-1gb Region string `json:"region"` NumNodes int `json:"numNodes"` - Status string `json:"status"` // pending, online, resizing, migrating, error + Status string `json:"status"` // pending, online, resizing, migrating, error Host string `json:"host"` Port int `json:"port"` DatabaseName string `json:"databaseName"` diff --git a/module/platform_do_database_test.go b/module/platform_do_database_test.go index 52e36f98..b1a28dcd 100644 --- a/module/platform_do_database_test.go +++ b/module/platform_do_database_test.go @@ -6,13 +6,13 @@ func TestPlatformDODatabase_MockBackend(t *testing.T) { m := &PlatformDODatabase{ name: "test-db", config: map[string]any{ - "provider": "mock", - "engine": "pg", - "version": "16", - "size": "db-s-1vcpu-1gb", - "region": "nyc1", + "provider": "mock", + "engine": "pg", + "version": "16", + "size": "db-s-1vcpu-1gb", + "region": "nyc1", "num_nodes": 1, - "name": "test-db", + "name": "test-db", }, state: &DODatabaseState{ Name: "test-db", diff --git a/module/scan_provider_test.go b/module/scan_provider_test.go index d6b83439..187e6d86 100644 --- a/module/scan_provider_test.go +++ b/module/scan_provider_test.go @@ -58,7 +58,7 @@ func (a *scanMockApp) GetService(name string, target any) error { return nil } -func (a *scanMockApp) RegisterService(name string, svc any) error { a.services[name] = svc; return nil } +func (a *scanMockApp) RegisterService(name string, svc any) error { a.services[name] = svc; return nil } func (a *scanMockApp) RegisterConfigSection(string, modular.ConfigProvider) {} func (a *scanMockApp) GetConfigSection(string) (modular.ConfigProvider, error) { return nil, nil @@ -67,7 +67,7 @@ func (a *scanMockApp) ConfigSections() map[string]modular.ConfigProvider { retur func (a *scanMockApp) Logger() modular.Logger { return nil } func (a *scanMockApp) SetLogger(modular.Logger) {} func (a *scanMockApp) ConfigProvider() modular.ConfigProvider { return nil } -func (a *scanMockApp) SvcRegistry() modular.ServiceRegistry { return a.services } +func (a *scanMockApp) SvcRegistry() modular.ServiceRegistry { return a.services } func (a *scanMockApp) RegisterModule(modular.Module) {} func (a *scanMockApp) Init() error { return nil } func (a *scanMockApp) Start() error { return nil } @@ -83,9 +83,9 @@ func (a *scanMockApp) GetServiceEntry(string) (*modular.ServiceRegistryEntry, bo func (a *scanMockApp) GetServicesByInterface(_ reflect.Type) []*modular.ServiceRegistryEntry { return nil } -func (a *scanMockApp) GetModule(string) modular.Module { return nil } -func (a *scanMockApp) GetAllModules() map[string]modular.Module { return nil } -func (a *scanMockApp) StartTime() time.Time { return time.Time{} } +func (a *scanMockApp) GetModule(string) modular.Module { return nil } +func (a *scanMockApp) GetAllModules() map[string]modular.Module { return nil } +func (a *scanMockApp) StartTime() time.Time { return time.Time{} } func (a *scanMockApp) OnConfigLoaded(func(modular.Application) error) {} func newScanApp(provider SecurityScannerProvider) *scanMockApp { diff --git a/plugins/auth/plugin.go b/plugins/auth/plugin.go index 10d8b0ba..fe2b1387 100644 --- a/plugins/auth/plugin.go +++ b/plugins/auth/plugin.go @@ -1,6 +1,7 @@ package auth import ( + "fmt" "log" "time" @@ -214,6 +215,48 @@ func (p *Plugin) ModuleFactories() map[string]plugin.ModuleFactory { requiredClaimVal := stringFromMap(introspectCfg, "requiredClaimVal") m.SetIntrospectPolicy(allowOthers, requiredScope, requiredClaim, requiredClaimVal) } + + // Register YAML-configured trusted keys for JWT-bearer grants. + if trustedKeys, ok := cfg["trustedKeys"].([]any); ok { + for i, tk := range trustedKeys { + tkMap, ok := tk.(map[string]any) + if !ok { + m.SetInitErr(fmt.Errorf("auth.m2m: trustedKeys[%d] must be an object", i)) + break + } + issuer := stringFromMap(tkMap, "issuer") + publicKeyPEM := stringFromMap(tkMap, "publicKeyPEM") + if issuer == "" { + m.SetInitErr(fmt.Errorf("auth.m2m: trustedKeys[%d] missing required field \"issuer\"", i)) + break + } + if publicKeyPEM == "" { + m.SetInitErr(fmt.Errorf("auth.m2m: trustedKeys[%d] (issuer %q) missing required field \"publicKeyPEM\"", i, issuer)) + break + } + var audiences []string + if auds, ok := tkMap["audiences"].([]any); ok { + for _, a := range auds { + if s, ok := a.(string); ok { + audiences = append(audiences, s) + } + } + } + var claimMapping map[string]string + if cm, ok := tkMap["claimMapping"].(map[string]any); ok { + claimMapping = make(map[string]string, len(cm)) + for k, v := range cm { + if s, ok := v.(string); ok { + claimMapping[k] = s + } + } + } + if err := m.AddTrustedKeyFromPEM(issuer, publicKeyPEM, audiences, claimMapping); err != nil { + m.SetInitErr(err) + break + } + } + } return m }, } @@ -380,6 +423,7 @@ func (p *Plugin) ModuleSchemas() []*schema.ModuleSchema { {Key: "issuer", Label: "Issuer", Type: schema.FieldTypeString, DefaultValue: "workflow", Description: "Token issuer (iss) claim", Placeholder: "workflow"}, {Key: "clients", Label: "Registered Clients", Type: schema.FieldTypeJSON, Description: "List of OAuth2 clients: [{clientId, clientSecret, scopes, description, claims}]"}, {Key: "introspect", Label: "Introspection Policy", Type: schema.FieldTypeJSON, Description: "Access-control policy for POST /oauth/introspect: {allowOthers: bool, requiredScope: string, requiredClaim: string, requiredClaimVal: string}. Default: self-only (allowOthers: false)."}, + {Key: "trustedKeys", Label: "Trusted External Issuers", Type: schema.FieldTypeJSON, Description: "List of trusted external JWT issuers for JWT-bearer grants: [{issuer, publicKeyPEM, audiences, claimMapping}]. Supports literal \\n in PEM values for Docker/Kubernetes env vars."}, }, DefaultConfig: map[string]any{"algorithm": "ES256", "tokenExpiry": "1h", "issuer": "workflow", "clients": []any{}}, }, diff --git a/plugins/auth/plugin_test.go b/plugins/auth/plugin_test.go index 441cf771..bca73c84 100644 --- a/plugins/auth/plugin_test.go +++ b/plugins/auth/plugin_test.go @@ -1,14 +1,21 @@ package auth import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" "net/http" "net/http/httptest" "net/url" "strings" "testing" + "time" "github.com/GoCodeAlone/workflow/module" "github.com/GoCodeAlone/workflow/plugin" + "github.com/golang-jwt/jwt/v5" ) func TestPluginImplementsEnginePlugin(t *testing.T) { @@ -168,3 +175,122 @@ func TestModuleFactoryM2MWithClaims(t *testing.T) { t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) } } + +func TestModuleFactoryM2MWithTrustedKeys(t *testing.T) { + // Generate a key pair to represent an external trusted issuer. + clientKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + pkixBytes, err := x509.MarshalPKIXPublicKey(&clientKey.PublicKey) + if err != nil { + t.Fatalf("MarshalPKIXPublicKey: %v", err) + } + pubKeyPEM := string(pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pkixBytes})) + + p := New() + factories := p.ModuleFactories() + + mod := factories["auth.m2m"]("m2m-test", map[string]any{ + "algorithm": "ES256", + "trustedKeys": []any{ + map[string]any{ + "issuer": "https://external-issuer.example.com", + "publicKeyPEM": pubKeyPEM, + "audiences": []any{"test-audience"}, + "claimMapping": map[string]any{ + "user_id": "ext_user", + }, + }, + }, + }) + if mod == nil { + t.Fatal("auth.m2m factory returned nil") + } + + m2mMod, ok := mod.(*module.M2MAuthModule) + if !ok { + t.Fatal("expected *module.M2MAuthModule") + } + + // Issue a JWT assertion signed by the external issuer's key. + claims := jwt.MapClaims{ + "iss": "https://external-issuer.example.com", + "sub": "external-service", + "aud": "test-audience", + "iat": time.Now().Unix(), + "exp": time.Now().Add(5 * time.Minute).Unix(), + } + tok := jwt.NewWithClaims(jwt.SigningMethodES256, claims) + assertion, err := tok.SignedString(clientKey) + if err != nil { + t.Fatalf("sign assertion: %v", err) + } + + params := url.Values{ + "grant_type": {module.GrantTypeJWTBearer}, + "assertion": {assertion}, + } + req := httptest.NewRequest("POST", "/oauth/token", strings.NewReader(params.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + w := httptest.NewRecorder() + m2mMod.Handle(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("expected 200 for JWT-bearer with trusted key, got %d: %s", w.Code, w.Body.String()) + } +} + +func TestModuleFactoryM2MWithTrustedKeys_MissingIssuer(t *testing.T) { + p := New() + factories := p.ModuleFactories() + + mod := factories["auth.m2m"]("m2m-test", map[string]any{ + "algorithm": "ES256", + "trustedKeys": []any{ + map[string]any{ + // issuer is missing + "publicKeyPEM": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEtest==\n-----END PUBLIC KEY-----", + }, + }, + }) + if mod == nil { + t.Fatal("auth.m2m factory returned nil") + } + m2mMod, ok := mod.(*module.M2MAuthModule) + if !ok { + t.Fatal("expected *module.M2MAuthModule") + } + + // Init should fail because trustedKeys[0] is missing issuer. + if err := m2mMod.Init(nil); err == nil { + t.Error("expected Init to return error for trustedKeys entry missing issuer") + } +} + +func TestModuleFactoryM2MWithTrustedKeys_MissingPEM(t *testing.T) { + p := New() + factories := p.ModuleFactories() + + mod := factories["auth.m2m"]("m2m-test", map[string]any{ + "algorithm": "ES256", + "trustedKeys": []any{ + map[string]any{ + "issuer": "https://external.example.com", + // publicKeyPEM is missing + }, + }, + }) + if mod == nil { + t.Fatal("auth.m2m factory returned nil") + } + m2mMod, ok := mod.(*module.M2MAuthModule) + if !ok { + t.Fatal("expected *module.M2MAuthModule") + } + + // Init should fail because trustedKeys[0] is missing publicKeyPEM. + if err := m2mMod.Init(nil); err == nil { + t.Error("expected Init to return error for trustedKeys entry missing publicKeyPEM") + } +} diff --git a/plugins/http/modules.go b/plugins/http/modules.go index 84be5730..239f62fc 100644 --- a/plugins/http/modules.go +++ b/plugins/http/modules.go @@ -173,25 +173,43 @@ func rateLimitMiddlewareFactory(name string, cfg map[string]any) modular.Module } func corsMiddlewareFactory(name string, cfg map[string]any) modular.Module { - allowedOrigins := []string{"*"} - allowedMethods := []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"} + corsCfg := module.CORSMiddlewareConfig{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + } if origins, ok := cfg["allowedOrigins"].([]any); ok { - allowedOrigins = make([]string, len(origins)) + corsCfg.AllowedOrigins = make([]string, len(origins)) for i, origin := range origins { if str, ok := origin.(string); ok { - allowedOrigins[i] = str + corsCfg.AllowedOrigins[i] = str } } } if methods, ok := cfg["allowedMethods"].([]any); ok { - allowedMethods = make([]string, len(methods)) + corsCfg.AllowedMethods = make([]string, len(methods)) for i, method := range methods { if str, ok := method.(string); ok { - allowedMethods[i] = str + corsCfg.AllowedMethods[i] = str + } + } + } + if headers, ok := cfg["allowedHeaders"].([]any); ok { + corsCfg.AllowedHeaders = make([]string, len(headers)) + for i, header := range headers { + if str, ok := header.(string); ok { + corsCfg.AllowedHeaders[i] = str } } } - return module.NewCORSMiddleware(name, allowedOrigins, allowedMethods) + if allowCreds, ok := cfg["allowCredentials"].(bool); ok { + corsCfg.AllowCredentials = allowCreds + } + if maxAge, ok := cfg["maxAge"].(int); ok { + corsCfg.MaxAge = maxAge + } else if maxAgeFloat, ok := cfg["maxAge"].(float64); ok { + corsCfg.MaxAge = int(maxAgeFloat) + } + return module.NewCORSMiddlewareWithConfig(name, corsCfg) } func requestIDMiddlewareFactory(name string, _ map[string]any) modular.Module { diff --git a/plugins/http/schemas.go b/plugins/http/schemas.go index 73212cd3..000e6650 100644 --- a/plugins/http/schemas.go +++ b/plugins/http/schemas.go @@ -176,12 +176,18 @@ func corsMiddlewareSchema() *schema.ModuleSchema { Inputs: []schema.ServiceIODef{{Name: "request", Type: "http.Request", Description: "HTTP request needing CORS headers"}}, Outputs: []schema.ServiceIODef{{Name: "cors", Type: "http.Request", Description: "HTTP request with CORS headers applied"}}, ConfigFields: []schema.ConfigFieldDef{ - {Key: "allowedOrigins", Label: "Allowed Origins", Type: schema.FieldTypeArray, ArrayItemType: "string", DefaultValue: []string{"*"}, Description: "Allowed origins (e.g. https://example.com, http://localhost:3000)"}, + {Key: "allowedOrigins", Label: "Allowed Origins", Type: schema.FieldTypeArray, ArrayItemType: "string", DefaultValue: []string{"*"}, Description: "Allowed origins (e.g. https://example.com, http://localhost:3000, *.example.com)"}, {Key: "allowedMethods", Label: "Allowed Methods", Type: schema.FieldTypeArray, ArrayItemType: "string", DefaultValue: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, Description: "Allowed HTTP methods"}, + {Key: "allowedHeaders", Label: "Allowed Headers", Type: schema.FieldTypeArray, ArrayItemType: "string", DefaultValue: []string{"Content-Type", "Authorization"}, Description: "Allowed request headers (e.g. Authorization, X-CSRF-Token, X-Request-Id)"}, + {Key: "allowCredentials", Label: "Allow Credentials", Type: schema.FieldTypeBool, DefaultValue: false, Description: "Whether to allow requests with credentials (cookies, authorization headers). When true, the actual Origin is reflected instead of *"}, + {Key: "maxAge", Label: "Max Age (sec)", Type: schema.FieldTypeNumber, DefaultValue: 0, Description: "How long (in seconds) the preflight response may be cached. 0 means no caching directive is sent"}, }, DefaultConfig: map[string]any{ - "allowedOrigins": []string{"*"}, - "allowedMethods": []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + "allowedOrigins": []string{"*"}, + "allowedMethods": []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + "allowedHeaders": []string{"Content-Type", "Authorization"}, + "allowCredentials": false, + "maxAge": 0, }, } } diff --git a/plugins/pipelinesteps/plugin.go b/plugins/pipelinesteps/plugin.go index 8c0b38f0..2d80b8b0 100644 --- a/plugins/pipelinesteps/plugin.go +++ b/plugins/pipelinesteps/plugin.go @@ -1,9 +1,9 @@ // Package pipelinesteps provides a plugin that registers generic pipeline step // types: validate, transform, conditional, set, log, delegate, jq, publish, // http_call, http_proxy, request_parse, db_query, db_exec, db_query_cached, json_response, -// raw_response, static_file, validate_path_param, validate_pagination, validate_request_body, -// foreach, webhook_verify, base64_decode, ui_scaffold, ui_scaffold_analyze, -// dlq_send, dlq_replay, retry_with_backoff, circuit_breaker (wrapping), +// raw_response, json_parse, static_file, validate_path_param, validate_pagination, +// validate_request_body, foreach, webhook_verify, base64_decode, ui_scaffold, +// ui_scaffold_analyze, dlq_send, dlq_replay, retry_with_backoff, circuit_breaker (wrapping), // s3_upload, auth_validate, authz_check, token_revoke, sandbox_exec. // It also provides the PipelineWorkflowHandler for composable pipelines. package pipelinesteps @@ -70,6 +70,7 @@ func New() *Plugin { "step.db_sync_partitions", "step.json_response", "step.raw_response", + "step.json_parse", "step.static_file", "step.workflow_call", "step.validate_path_param", @@ -141,6 +142,7 @@ func (p *Plugin) StepFactories() map[string]plugin.StepFactory { "step.db_sync_partitions": wrapStepFactory(module.NewDBSyncPartitionsStepFactory()), "step.json_response": wrapStepFactory(module.NewJSONResponseStepFactory()), "step.raw_response": wrapStepFactory(module.NewRawResponseStepFactory()), + "step.json_parse": wrapStepFactory(module.NewJSONParseStepFactory()), "step.static_file": wrapStepFactory(module.NewStaticFileStepFactory()), "step.validate_path_param": wrapStepFactory(module.NewValidatePathParamStepFactory()), "step.validate_pagination": wrapStepFactory(module.NewValidatePaginationStepFactory()), diff --git a/plugins/pipelinesteps/plugin_test.go b/plugins/pipelinesteps/plugin_test.go index 052462e2..4f26339c 100644 --- a/plugins/pipelinesteps/plugin_test.go +++ b/plugins/pipelinesteps/plugin_test.go @@ -49,6 +49,7 @@ func TestStepFactories(t *testing.T) { "step.db_sync_partitions", "step.json_response", "step.raw_response", + "step.json_parse", "step.static_file", "step.validate_path_param", "step.validate_pagination", diff --git a/plugins/storage/plugin.go b/plugins/storage/plugin.go index 5a40f448..350920ce 100644 --- a/plugins/storage/plugin.go +++ b/plugins/storage/plugin.go @@ -186,6 +186,12 @@ func (p *Plugin) ModuleFactories() map[string]plugin.ModuleFactory { if sc, ok := cfg["sourceColumn"].(string); ok { partCfg.SourceColumn = sc } + if autoSync, ok := cfg["autoSync"].(bool); ok { + partCfg.AutoSync = &autoSync + } + if syncInterval, ok := cfg["syncInterval"].(string); ok { + partCfg.SyncInterval = syncInterval + } if partitions, ok := cfg["partitions"].([]any); ok { for _, item := range partitions { pMap, ok := item.(map[string]any) @@ -399,10 +405,12 @@ func (p *Plugin) ModuleSchemas() []*schema.ModuleSchema { {Key: "partitionNameFormat", Label: "Partition Name Format", Type: schema.FieldTypeString, DefaultValue: "{table}_{tenant}", Description: "Template for partition table names. Supports {table} and {tenant} placeholders.", Placeholder: "{table}_{tenant}"}, {Key: "sourceTable", Label: "Source Table", Type: schema.FieldTypeString, Description: "Table containing all tenant IDs for auto-partition sync (e.g. tenants)", Placeholder: "tenants"}, {Key: "sourceColumn", Label: "Source Column", Type: schema.FieldTypeString, Description: "Column in source table to query for tenant values. Defaults to partitionKey.", Placeholder: "id"}, + {Key: "autoSync", Label: "Auto Sync", Type: schema.FieldTypeBool, DefaultValue: true, Description: "Automatically sync partitions from sourceTable on startup. Defaults to true when sourceTable is set."}, + {Key: "syncInterval", Label: "Sync Interval", Type: schema.FieldTypeDuration, Description: "Interval for periodic partition re-sync from sourceTable (e.g. 60s, 5m). Leave empty to disable.", Placeholder: "60s"}, {Key: "maxOpenConns", Label: "Max Open Connections", Type: schema.FieldTypeNumber, DefaultValue: 25, Description: "Maximum number of open database connections"}, {Key: "maxIdleConns", Label: "Max Idle Connections", Type: schema.FieldTypeNumber, DefaultValue: 5, Description: "Maximum number of idle connections in the pool"}, }, - DefaultConfig: map[string]any{"maxOpenConns": 25, "maxIdleConns": 5, "partitionType": "list", "partitionNameFormat": "{table}_{tenant}"}, + DefaultConfig: map[string]any{"maxOpenConns": 25, "maxIdleConns": 5, "partitionType": "list", "partitionNameFormat": "{table}_{tenant}", "autoSync": true}, }, { Type: "persistence.store",