diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index eef415a3..6cbc8fb9 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -2,7 +2,7 @@ ## Overview -The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 50+ built-in module types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. +The Workflow Engine is a configuration-driven orchestration platform built in Go. It turns YAML configuration files into running applications with no code changes required. The engine provides 90+ module types and 130+ pipeline step types, a visual workflow builder UI, a multi-tenant admin platform, AI-assisted configuration generation, and dynamic hot-reload of Go components at runtime. ## Core Engine @@ -19,70 +19,96 @@ The engine is built on the [CrisisTextLine/modular](https://github.com/CrisisTex - `cmd/server` -- runs workflow configs as a server process - `cmd/wfctl` -- validates and inspects workflow configs offline -## Module Types (50+) +## Module Types (90+) -All modules are registered in `engine.go` and instantiated from YAML config. Organized by category: +All modules are instantiated from YAML config via the plugin factory registry. Organized by category. Each module type is provided by a plugin (see **Plugin** column); all are included when using `plugins/all`. + +> **Plugin tiers:** *Core* plugins are loaded by default. Plugin column shows which plugin package registers the type. ### HTTP & Routing -| Type | Description | -|------|-------------| -| `http.server` | Configurable web server | -| `http.router` | Request routing with path and method matching | -| `http.handler` | HTTP request processing with configurable responses | -| `http.proxy` | HTTP reverse proxy | -| `http.simple_proxy` | Simplified proxy configuration | -| `httpserver.modular` | Modular framework HTTP server integration | -| `httpclient.modular` | Modular framework HTTP client | -| `chimux.router` | Chi mux-based router | -| `reverseproxy` | Modular framework reverse proxy (v2) | -| `static.fileserver` | Static file serving | +| Type | Description | Plugin | +|------|-------------|--------| +| `http.server` | Configurable web server | http | +| `http.router` | Request routing with path and method matching | http | +| `http.handler` | HTTP request processing with configurable responses | http | +| `http.proxy` | HTTP reverse proxy | http | +| `http.simple_proxy` | Simplified proxy configuration | http | +| `reverseproxy` | Modular framework reverse proxy (v2) | http | +| `static.fileserver` | Static file serving | http | +| `openapi` | OpenAPI v3 spec-driven HTTP route generation with request validation and Swagger UI | openapi | + +> `httpserver.modular`, `httpclient.modular`, and `chimux.router` were removed in favor of `http.server`, `http.router`, and `reverseproxy`. ### Middleware -| Type | Description | -|------|-------------| -| `http.middleware.auth` | Authentication middleware | -| `http.middleware.cors` | CORS header management | -| `http.middleware.logging` | Request/response logging | -| `http.middleware.ratelimit` | Rate limiting | -| `http.middleware.requestid` | Request ID injection | -| `http.middleware.securityheaders` | Security header injection | +| Type | Description | Plugin | +|------|-------------|--------| +| `http.middleware.auth` | Authentication middleware | http | +| `http.middleware.cors` | CORS header management | http | +| `http.middleware.logging` | Request/response logging | http | +| `http.middleware.ratelimit` | Rate limiting | http | +| `http.middleware.requestid` | Request ID injection | http | +| `http.middleware.securityheaders` | Security header injection | http | +| `http.middleware.otel` | OpenTelemetry request tracing middleware | observability | ### Authentication -| Type | Description | -|------|-------------| -| `auth.jwt` | JWT authentication with seed users, persistence, token refresh | -| `auth.modular` | Modular framework auth integration | -| `auth.user-store` | User storage backend | +| Type | Description | Plugin | +|------|-------------|--------| +| `auth.jwt` | JWT authentication with seed users, persistence, token refresh | auth | +| `auth.user-store` | User storage backend | auth | +| `auth.oauth2` | OAuth2 authorization code flow (Google, GitHub, generic OIDC) | auth | +| `auth.m2m` | Machine-to-machine OAuth2: client_credentials grant, JWT-bearer, ES256/HS256, JWKS endpoint | auth | +| `auth.token-blacklist` | Token revocation blacklist backed by SQLite or in-memory store | auth | +| `security.field-protection` | Field-level encryption/decryption for sensitive data fields | auth | + +> `auth.modular` was removed in favor of `auth.jwt`. ### API & CQRS -| Type | Description | -|------|-------------| -| `api.handler` | Generic REST resource handler | -| `api.command` | CQRS command handler with route pipelines | -| `api.query` | CQRS query handler with route pipelines | +| Type | Description | Plugin | +|------|-------------|--------| +| `api.handler` | Generic REST resource handler | api | +| `api.command` | CQRS command handler with route pipelines | api | +| `api.query` | CQRS query handler with route pipelines | api | +| `api.gateway` | Composable API gateway: routing, auth, rate limiting, CORS, and reverse proxying | api | +| `workflow.registry` | SQLite-backed registry for companies, organizations, projects, and workflows | api | +| `data.transformer` | Data transformation between formats using configurable pipelines | api | +| `processing.step` | Executes a component as a processing step in a workflow, with retry and compensation | api | ### State Machine -| Type | Description | -|------|-------------| -| `statemachine.engine` | State definitions, transitions, hooks, auto-transitions | -| `state.tracker` | State observation and tracking | -| `state.connector` | State machine interconnection | +| Type | Description | Plugin | +|------|-------------|--------| +| `statemachine.engine` | State definitions, transitions, hooks, auto-transitions | statemachine | +| `state.tracker` | State observation and tracking | statemachine | +| `state.connector` | State machine interconnection | statemachine | ### Messaging -| Type | Description | -|------|-------------| -| `messaging.broker` | In-memory message broker | -| `messaging.broker.eventbus` | EventBus-backed message broker | -| `messaging.handler` | Message processing handler | -| `messaging.kafka` | Apache Kafka broker integration | -| `messaging.nats` | NATS broker integration | +| Type | Description | Plugin | +|------|-------------|--------| +| `messaging.broker` | In-memory message broker | messaging | +| `messaging.broker.eventbus` | EventBus-backed message broker | messaging | +| `messaging.handler` | Message processing handler | messaging | +| `messaging.kafka` | Apache Kafka broker integration | messaging | +| `messaging.nats` | NATS broker integration | messaging | +| `notification.slack` | Slack notification sender | messaging | +| `webhook.sender` | Outbound webhook delivery with retry and dead letter | messaging | + +> `eventbus.modular` was removed in favor of `messaging.broker.eventbus`. ### Database & Persistence -| Type | Description | -|------|-------------| -| `database.modular` | Modular framework database integration | -| `database.workflow` | Workflow-specific database (SQLite + PostgreSQL) | -| `persistence.store` | Write-through persistence (SQLite/PostgreSQL) | +| Type | Description | Plugin | +|------|-------------|--------| +| `database.workflow` | Workflow-specific database (SQLite + PostgreSQL) | storage | +| `persistence.store` | Write-through persistence (SQLite/PostgreSQL) | storage | +| `database.partitioned` | PostgreSQL partitioned database for multi-tenant data isolation (LIST/RANGE partitions) | storage | + +> `database.modular` was removed in favor of `database.workflow`. + +### NoSQL / Datastores +| Type | Description | Plugin | +|------|-------------|--------| +| `nosql.memory` | In-memory key-value NoSQL store for development and testing | datastores | +| `nosql.dynamodb` | AWS DynamoDB NoSQL store | datastores | +| `nosql.mongodb` | MongoDB document store | datastores | +| `nosql.redis` | Redis key-value store | datastores | ### Pipeline Steps @@ -107,71 +133,179 @@ flowchart TD style I fill:#e8f5e9,stroke:#388e3c ``` -| Type | Description | -|------|-------------| -| `processing.step` | Configurable processing step | -| `step.validate` | Validates pipeline data against required fields or JSON schema | -| `step.transform` | Transforms data shape and field mapping | -| `step.conditional` | Conditional branching based on field values | -| `step.set` | Sets values in pipeline context with template support | -| `step.log` | Logs pipeline data for debugging | -| `step.publish` | Publishes events to EventBus | -| `step.event_publish` | Publishes events to EventBus with full envelope control | -| `step.http_call` | Makes outbound HTTP requests | -| `step.delegate` | Delegates to a named service | -| `step.request_parse` | Extracts path params, query params, and request body from HTTP requests | -| `step.db_query` | Executes parameterized SQL SELECT queries against a named database | -| `step.db_exec` | Executes parameterized SQL INSERT/UPDATE/DELETE against a named database | -| `step.db_query_cached` | Executes a cached SQL SELECT query | -| `step.db_create_partition` | Creates a time-based table partition | -| `step.db_sync_partitions` | Ensures future partitions exist for a partitioned table | -| `step.json_response` | Writes HTTP JSON response with custom status code and headers | -| `step.raw_response` | Writes a raw HTTP response with arbitrary content type | -| `step.json_parse` | Parses a JSON string (or `[]byte`) in the pipeline context into a structured object | -| `step.static_file` | Serves a pre-loaded file from disk as an HTTP response | -| `step.workflow_call` | Invokes another workflow pipeline by name | -| `step.validate_path_param` | Validates a URL path parameter against a set of rules | -| `step.validate_pagination` | Validates and normalizes pagination query params | -| `step.validate_request_body` | Validates request body against a JSON schema | -| `step.foreach` | Iterates over a slice and runs sub-steps per element. Optional `concurrency: N` for parallel processing | -| `step.parallel` | Executes named sub-steps concurrently and collects results. O(max(branch)) time | -| `step.webhook_verify` | Verifies an inbound webhook signature | -| `step.base64_decode` | Decodes a base64-encoded field | -| `step.cache_get` | Reads a value from the cache module | -| `step.cache_set` | Writes a value to the cache module | -| `step.cache_delete` | Deletes a value from the cache module | -| `step.ui_scaffold` | Generates UI scaffolding from a workflow config | -| `step.ui_scaffold_analyze` | Analyzes UI scaffold state for a workflow | -| `step.dlq_send` | Sends a message to the dead-letter queue | -| `step.dlq_replay` | Replays messages from the dead-letter queue | -| `step.retry_with_backoff` | Retries a sub-pipeline with exponential backoff | -| `step.resilient_circuit_breaker` | Wraps a sub-pipeline with a circuit breaker | -| `step.s3_upload` | Uploads a file or data to an S3-compatible bucket | -| `step.auth_validate` | Validates an authentication token and populates claims | -| `step.token_revoke` | Revokes an auth token | -| `step.field_reencrypt` | Re-encrypts a field with a new key | -| `step.sandbox_exec` | Executes a command inside a sandboxed container | -| `step.http_proxy` | Proxies an HTTP request to an upstream service | -| `step.hash` | Computes a cryptographic hash (md5/sha256/sha512) of a template-resolved input | -| `step.regex_match` | Matches a regular expression against a template-resolved input | -| `step.jq` | Applies a JQ expression to pipeline data for complex transformations | -| `step.ai_complete` | AI text completion using a configured provider | -| `step.ai_classify` | AI text classification into named categories | -| `step.ai_extract` | AI structured data extraction using tool use or prompt-based parsing | -| `step.actor_send` | Sends a fire-and-forget message to an actor pool (Tell) | -| `step.actor_ask` | Sends a request-response message to an actor and returns the response (Ask) | +| Type | Description | Plugin | +|------|-------------|--------| +| `processing.step` | Configurable processing step | api | +| `step.validate` | Validates pipeline data against required fields or JSON schema | pipelinesteps | +| `step.transform` | Transforms data shape and field mapping | pipelinesteps | +| `step.conditional` | Conditional branching based on field values | pipelinesteps | +| `step.set` | Sets values in pipeline context with template support | pipelinesteps | +| `step.log` | Logs pipeline data for debugging | pipelinesteps | +| `step.publish` | Publishes events to EventBus | pipelinesteps | +| `step.event_publish` | Publishes events to EventBus with full envelope control | pipelinesteps | +| `step.http_call` | Makes outbound HTTP requests | pipelinesteps | +| `step.delegate` | Delegates to a named service | pipelinesteps | +| `step.request_parse` | Extracts path params, query params, and request body from HTTP requests | pipelinesteps | +| `step.db_query` | Executes parameterized SQL SELECT queries against a named database | pipelinesteps | +| `step.db_exec` | Executes parameterized SQL INSERT/UPDATE/DELETE against a named database | pipelinesteps | +| `step.db_query_cached` | Executes a cached SQL SELECT query | pipelinesteps | +| `step.db_create_partition` | Creates a time-based table partition | pipelinesteps | +| `step.db_sync_partitions` | Ensures future partitions exist for a partitioned table | pipelinesteps | +| `step.json_response` | Writes HTTP JSON response with custom status code and headers | pipelinesteps | +| `step.raw_response` | Writes a raw HTTP response with arbitrary content type | pipelinesteps | +| `step.json_parse` | Parses a JSON string (or `[]byte`) in the pipeline context into a structured object | pipelinesteps | +| `step.static_file` | Serves a pre-loaded file from disk as an HTTP response | pipelinesteps | +| `step.workflow_call` | Invokes another workflow pipeline by name | pipelinesteps | +| `step.sub_workflow` | Executes a named sub-workflow inline and merges its output | ai | +| `step.validate_path_param` | Validates a URL path parameter against a set of rules | pipelinesteps | +| `step.validate_pagination` | Validates and normalizes pagination query params | pipelinesteps | +| `step.validate_request_body` | Validates request body against a JSON schema | pipelinesteps | +| `step.foreach` | Iterates over a slice and runs sub-steps per element. Optional `concurrency: N` for parallel processing | pipelinesteps | +| `step.parallel` | Executes named sub-steps concurrently and collects results. O(max(branch)) time | pipelinesteps | +| `step.webhook_verify` | Verifies an inbound webhook signature | pipelinesteps | +| `step.base64_decode` | Decodes a base64-encoded field | pipelinesteps | +| `step.cache_get` | Reads a value from the cache module | pipelinesteps | +| `step.cache_set` | Writes a value to the cache module | pipelinesteps | +| `step.cache_delete` | Deletes a value from the cache module | pipelinesteps | +| `step.ui_scaffold` | Generates UI scaffolding from a workflow config | pipelinesteps | +| `step.ui_scaffold_analyze` | Analyzes UI scaffold state for a workflow | pipelinesteps | +| `step.dlq_send` | Sends a message to the dead-letter queue | pipelinesteps | +| `step.dlq_replay` | Replays messages from the dead-letter queue | pipelinesteps | +| `step.retry_with_backoff` | Retries a sub-pipeline with exponential backoff | pipelinesteps | +| `step.resilient_circuit_breaker` | Wraps a sub-pipeline with a circuit breaker | pipelinesteps | +| `step.s3_upload` | Uploads a file or data to an S3-compatible bucket | pipelinesteps | +| `step.auth_validate` | Validates an authentication token and populates claims | pipelinesteps | +| `step.token_revoke` | Revokes an auth token | pipelinesteps | +| `step.field_reencrypt` | Re-encrypts a field with a new key | pipelinesteps | +| `step.sandbox_exec` | Executes a command inside a sandboxed container | pipelinesteps | +| `step.http_proxy` | Proxies an HTTP request to an upstream service | pipelinesteps | +| `step.hash` | Computes a cryptographic hash (md5/sha256/sha512) of a template-resolved input | pipelinesteps | +| `step.regex_match` | Matches a regular expression against a template-resolved input | pipelinesteps | +| `step.jq` | Applies a JQ expression to pipeline data for complex transformations | pipelinesteps | +| `step.ai_complete` | AI text completion using a configured provider | ai | +| `step.ai_classify` | AI text classification into named categories | ai | +| `step.ai_extract` | AI structured data extraction using tool use or prompt-based parsing | ai | +| `step.actor_send` | Sends a fire-and-forget message to an actor pool (Tell) | actors | +| `step.actor_ask` | Sends a request-response message to an actor and returns the response (Ask) | actors | +| `step.rate_limit` | Applies per-client or global rate limiting to a pipeline step | http | +| `step.circuit_breaker` | Wraps a sub-pipeline with a circuit breaker (open/half-open/closed) | http | +| `step.feature_flag` | Evaluates a feature flag and branches based on the result | featureflags | +| `step.ff_gate` | Blocks execution unless a named feature flag is enabled | featureflags | +| `step.authz_check` | Evaluates an authorization policy (OPA, Casbin, or mock) for the current request | policy | +| `step.cli_invoke` | Invokes a registered CLI command by name | scheduler | +| `step.cli_print` | Prints output to stdout (used in CLI workflows) | scheduler | +| `step.statemachine_transition` | Triggers a state machine transition for the given entity | statemachine | +| `step.statemachine_get` | Retrieves the current state and metadata for a state machine entity | statemachine | +| `step.nosql_get` | Reads a document from a NoSQL store by key | datastores | +| `step.nosql_put` | Writes a document to a NoSQL store | datastores | +| `step.nosql_delete` | Deletes a document from a NoSQL store by key | datastores | +| `step.nosql_query` | Queries a NoSQL store with filter expressions | datastores | +| `step.artifact_upload` | Uploads a file to the artifact store | storage | +| `step.artifact_download` | Downloads a file from the artifact store | storage | +| `step.artifact_list` | Lists artifacts in the store for a given prefix | storage | +| `step.artifact_delete` | Deletes an artifact from the store | storage | +| `step.secret_rotate` | Rotates a secret in the configured secrets backend | secrets | +| `step.cloud_validate` | Validates cloud account credentials and configuration | cloud | +| `step.trace_start` | Starts an OpenTelemetry trace span for the current pipeline | observability | +| `step.trace_inject` | Injects trace context headers into outgoing request metadata | observability | +| `step.trace_extract` | Extracts trace context from incoming request headers | observability | +| `step.trace_annotate` | Adds key/value annotations to the current trace span | observability | +| `step.trace_link` | Links the current span to an external span by trace/span ID | observability | +| `step.gitlab_trigger_pipeline` | Triggers a GitLab CI/CD pipeline via the GitLab API | gitlab | +| `step.gitlab_pipeline_status` | Polls a GitLab pipeline until it reaches a terminal state | gitlab | +| `step.gitlab_create_mr` | Creates a GitLab merge request | gitlab | +| `step.gitlab_mr_comment` | Adds a comment to a GitLab merge request | gitlab | +| `step.gitlab_parse_webhook` | Parses and validates an inbound GitLab webhook payload | gitlab | +| `step.policy_evaluate` | Evaluates a named policy with the given input and returns allow/deny | policy | +| `step.policy_load` | Loads a policy definition into the policy engine at runtime | policy | +| `step.policy_list` | Lists all loaded policies in the policy engine | policy | +| `step.policy_test` | Runs a policy against test cases and reports pass/fail | policy | +| `step.marketplace_search` | Searches the plugin marketplace for available extensions | marketplace | +| `step.marketplace_detail` | Fetches detail information for a marketplace plugin | marketplace | +| `step.marketplace_install` | Installs a plugin from the marketplace | marketplace | +| `step.marketplace_installed` | Lists installed marketplace plugins | marketplace | +| `step.marketplace_uninstall` | Uninstalls a marketplace plugin | marketplace | +| `step.marketplace_update` | Updates a marketplace plugin to the latest version | marketplace | ### CI/CD Pipeline Steps -| Type | Description | -|------|-------------| -| `step.docker_build` | Builds a Docker image from a context directory and Dockerfile | -| `step.docker_push` | Pushes a Docker image to a remote registry | -| `step.docker_run` | Runs a command inside a Docker container via sandbox | -| `step.scan_sast` | Static Application Security Testing (SAST) via configurable scanner | -| `step.scan_container` | Container image vulnerability scanning via Trivy | -| `step.scan_deps` | Dependency vulnerability scanning via Grype | -| `step.artifact_push` | Stores a file in the artifact store for cross-step sharing | -| `step.artifact_pull` | Retrieves an artifact from a prior execution, URL, or S3 | +| Type | Description | Plugin | +|------|-------------|--------| +| `step.docker_build` | Builds a Docker image from a context directory and Dockerfile | cicd | +| `step.docker_push` | Pushes a Docker image to a remote registry | cicd | +| `step.docker_run` | Runs a command inside a Docker container via sandbox | cicd | +| `step.scan_sast` | Static Application Security Testing (SAST) via configurable scanner | cicd | +| `step.scan_container` | Container image vulnerability scanning via Trivy | cicd | +| `step.scan_deps` | Dependency vulnerability scanning via Grype | cicd | +| `step.artifact_push` | Stores a file in the artifact store for cross-step sharing | cicd | +| `step.artifact_pull` | Retrieves an artifact from a prior execution, URL, or S3 | cicd | +| `step.shell_exec` | Executes an arbitrary shell command | cicd | +| `step.build_binary` | Builds a Go binary using `go build` | cicd | +| `step.build_from_config` | Builds the workflow server binary from a YAML config | cicd | +| `step.build_ui` | Builds the UI assets from a frontend config | cicd | +| `step.deploy` | Deploys a built artifact to an environment | cicd | +| `step.gate` | Manual approval gate — pauses pipeline until an external signal is received | cicd | +| `step.git_clone` | Clones a Git repository | cicd | +| `step.git_commit` | Commits staged changes in a local Git repository | cicd | +| `step.git_push` | Pushes commits to a remote Git repository | cicd | +| `step.git_tag` | Creates and optionally pushes a Git tag | cicd | +| `step.git_checkout` | Checks out a branch, tag, or commit in a local repository | cicd | +| `step.codebuild_create_project` | Creates an AWS CodeBuild project | cicd | +| `step.codebuild_start` | Starts an AWS CodeBuild build | cicd | +| `step.codebuild_status` | Polls an AWS CodeBuild build until completion | cicd | +| `step.codebuild_logs` | Fetches logs from an AWS CodeBuild build | cicd | +| `step.codebuild_list_builds` | Lists recent AWS CodeBuild builds for a project | cicd | +| `step.codebuild_delete_project` | Deletes an AWS CodeBuild project | cicd | + +### Platform & Infrastructure Pipeline Steps +| Type | Description | Plugin | +|------|-------------|--------| +| `step.platform_template` | Renders an infrastructure template (Terraform, Helm, etc.) with pipeline context variables | platform | +| `step.k8s_plan` | Generates a Kubernetes deployment plan (dry-run) | platform | +| `step.k8s_apply` | Applies a Kubernetes manifest or deployment config | platform | +| `step.k8s_status` | Retrieves the status of a Kubernetes workload | platform | +| `step.k8s_destroy` | Tears down a Kubernetes workload | platform | +| `step.ecs_plan` | Generates an ECS task/service deployment plan | platform | +| `step.ecs_apply` | Deploys a task or service to AWS ECS | platform | +| `step.ecs_status` | Retrieves the status of an ECS service | platform | +| `step.ecs_destroy` | Removes an ECS task or service | platform | +| `step.iac_plan` | Plans IaC changes (Terraform plan, Pulumi preview, etc.) | platform | +| `step.iac_apply` | Applies IaC changes | platform | +| `step.iac_status` | Retrieves the current state of an IaC stack | platform | +| `step.iac_destroy` | Destroys all resources in an IaC stack | platform | +| `step.iac_drift_detect` | Detects configuration drift between desired and actual state | platform | +| `step.dns_plan` | Plans DNS record changes | platform | +| `step.dns_apply` | Applies DNS record changes | platform | +| `step.dns_status` | Retrieves the current DNS records for a domain | platform | +| `step.network_plan` | Plans networking resource changes (VPC, subnets, etc.) | platform | +| `step.network_apply` | Applies networking resource changes | platform | +| `step.network_status` | Retrieves the status of networking resources | platform | +| `step.apigw_plan` | Plans API gateway configuration changes | platform | +| `step.apigw_apply` | Applies API gateway configuration changes | platform | +| `step.apigw_status` | Retrieves API gateway deployment status | platform | +| `step.apigw_destroy` | Removes an API gateway configuration | platform | +| `step.scaling_plan` | Plans auto-scaling policy changes | platform | +| `step.scaling_apply` | Applies auto-scaling policies | platform | +| `step.scaling_status` | Retrieves current auto-scaling state | platform | +| `step.scaling_destroy` | Removes auto-scaling policies | platform | +| `step.app_deploy` | Deploys a containerized application | platform | +| `step.app_status` | Retrieves deployment status of an application | platform | +| `step.app_rollback` | Rolls back an application to a previous deployment | platform | +| `step.region_deploy` | Deploys workloads to a specific cloud region | platform | +| `step.region_promote` | Promotes a deployment from staging to production across regions | platform | +| `step.region_failover` | Triggers a regional failover | platform | +| `step.region_status` | Retrieves health and routing status for a region | platform | +| `step.region_weight` | Adjusts traffic weight for a region in the router | platform | +| `step.region_sync` | Synchronizes configuration across regions | platform | +| `step.argo_submit` | Submits an Argo Workflow | platform | +| `step.argo_status` | Polls an Argo Workflow until completion | platform | +| `step.argo_logs` | Retrieves logs from an Argo Workflow | platform | +| `step.argo_delete` | Deletes an Argo Workflow | platform | +| `step.argo_list` | Lists Argo Workflows for a namespace | platform | +| `step.do_deploy` | Deploys to DigitalOcean App Platform | platform | +| `step.do_status` | Retrieves DigitalOcean App Platform deployment status | platform | +| `step.do_logs` | Fetches DigitalOcean App Platform runtime logs | platform | +| `step.do_scale` | Scales a DigitalOcean App Platform component | platform | +| `step.do_destroy` | Destroys a DigitalOcean App Platform deployment | platform | ### Template Functions @@ -280,75 +414,434 @@ value: '{{ index .steps "parse-request" "path_params" "id" }}' `wfctl template validate --config workflow.yaml` lints template expressions and warns on undefined step references, forward references, and suggests the `step` function for hyphenated names. ### Infrastructure -| Type | Description | -|------|-------------| -| `license.validator` | License key validation against a remote server with caching and grace period | -| `platform.provider` | Cloud infrastructure provider declaration (e.g., Terraform, Pulumi) | -| `platform.resource` | Infrastructure resource managed by a platform provider | -| `platform.context` | Execution context for platform operations (org, environment, tier) | -| `platform.do_app` | DigitalOcean App Platform deployment (deploy, scale, logs, destroy) | -| `platform.do_networking` | DigitalOcean VPC and firewall management | -| `platform.do_dns` | DigitalOcean domain and DNS record management | -| `platform.do_database` | DigitalOcean Managed Database (PostgreSQL, MySQL, Redis) | -| `iac.state` | IaC state persistence (memory, filesystem, or spaces/S3-compatible backends) | +| Type | Description | Plugin | +|------|-------------|--------| +| `license.validator` | License key validation against a remote server with caching and grace period | license | +| `platform.provider` | Cloud infrastructure provider declaration (e.g., Terraform, Pulumi) | platform | +| `platform.resource` | Infrastructure resource managed by a platform provider | platform | +| `platform.context` | Execution context for platform operations (org, environment, tier) | platform | +| `platform.kubernetes` | Kubernetes cluster deployment target | platform | +| `platform.ecs` | AWS ECS cluster deployment target | platform | +| `platform.dns` | DNS provider for managing records (Route53, CloudFlare, etc.) | platform | +| `platform.networking` | VPC and networking resource management | platform | +| `platform.apigateway` | API gateway resource management (AWS API GW, etc.) | platform | +| `platform.autoscaling` | Auto-scaling policy and target management | platform | +| `platform.region` | Multi-region deployment configuration | platform | +| `platform.region_router` | Routes traffic across regions by weight, latency, or failover | platform | +| `platform.doks` | DigitalOcean Kubernetes Service (DOKS) deployment | platform | +| `platform.do_app` | DigitalOcean App Platform deployment (deploy, scale, logs, destroy) | platform | +| `platform.do_networking` | DigitalOcean VPC and firewall management | platform | +| `platform.do_dns` | DigitalOcean domain and DNS record management | platform | +| `platform.do_database` | DigitalOcean Managed Database (PostgreSQL, MySQL, Redis) | platform | +| `iac.state` | IaC state persistence (memory, filesystem, or spaces/S3-compatible backends) | platform | +| `app.container` | Containerised application deployment descriptor | platform | +| `argo.workflows` | Argo Workflows integration for Kubernetes-native workflow orchestration | platform | +| `aws.codebuild` | AWS CodeBuild project and build management | cicd | ### Observability -| Type | Description | -|------|-------------| -| `metrics.collector` | Prometheus metrics collection and `/metrics` endpoint | -| `health.checker` | Health endpoints (`/healthz`, `/readyz`, `/livez`) | -| `log.collector` | Centralized log collection | -| `observability.otel` | OpenTelemetry tracing integration | -| `eventlogger.modular` | Modular framework event logger | +| Type | Description | Plugin | +|------|-------------|--------| +| `metrics.collector` | Prometheus metrics collection and `/metrics` endpoint | observability | +| `health.checker` | Health endpoints (`/healthz`, `/readyz`, `/livez`) | observability | +| `log.collector` | Centralized log collection | observability | +| `observability.otel` | OpenTelemetry tracing integration | observability | +| `openapi.generator` | OpenAPI spec generation from workflow config | observability | +| `tracing.propagation` | OpenTelemetry trace-context propagation module | observability | + +> `eventlogger.modular` was removed; use `log.collector` or structured slog logging instead. ### Storage -| Type | Description | -|------|-------------| -| `storage.s3` | Amazon S3 storage | -| `storage.gcs` | Google Cloud Storage | -| `storage.local` | Local filesystem storage | -| `storage.sqlite` | SQLite storage | +| Type | Description | Plugin | +|------|-------------|--------| +| `storage.s3` | Amazon S3 storage | storage | +| `storage.gcs` | Google Cloud Storage | storage | +| `storage.local` | Local filesystem storage | storage | +| `storage.sqlite` | SQLite storage | storage | +| `storage.artifact` | Artifact store for build artifacts shared across pipeline steps | storage | +| `cache.redis` | Redis-backed cache module | storage | ### Actor Model -| Type | Description | -|------|-------------| -| `actor.system` | goakt v4 actor system — manages actor lifecycle and fault recovery | -| `actor.pool` | Defines a group of actors with shared behavior, routing strategy, and recovery policy | +| Type | Description | Plugin | +|------|-------------|--------| +| `actor.system` | goakt v4 actor system — manages actor lifecycle and fault recovery | actors | +| `actor.pool` | Defines a group of actors with shared behavior, routing strategy, and recovery policy | actors | ### Scheduling -| Type | Description | -|------|-------------| -| `scheduler.modular` | Cron-based job scheduling | +| Type | Description | Plugin | +|------|-------------|--------| +| `scheduler.modular` | Cron-based job scheduling | modularcompat | ### Integration -| Type | Description | -|------|-------------| -| `webhook.sender` | Outbound webhook delivery with retry and dead letter | -| `notification.slack` | Slack notifications | -| `openapi` | OpenAPI v3 spec-driven route registration with request and response validation | -| `openapi.consumer` | OpenAPI spec consumer for external service integration | -| `openapi.generator` | OpenAPI spec generation from workflow config | +| Type | Description | Plugin | +|------|-------------|--------| +| `webhook.sender` | Outbound webhook delivery with retry and dead letter | messaging | +| `notification.slack` | Slack notifications | messaging | +| `openapi.consumer` | OpenAPI spec consumer for external service integration | observability | +| `gitlab.webhook` | GitLab webhook receiver and validator | gitlab | +| `gitlab.client` | GitLab API client (pipelines, MRs, repos) | gitlab | +| `cloud.account` | Cloud account credential holder (AWS, GCP, Azure) | cloud | +| `security.scanner` | Security scanning provider for SAST/container/dependency scans | scanner | +| `policy.mock` | In-memory mock policy engine for testing | policy | ### Secrets -| Type | Description | -|------|-------------| -| `secrets.vault` | HashiCorp Vault integration | -| `secrets.aws` | AWS Secrets Manager integration | +| Type | Description | Plugin | +|------|-------------|--------| +| `secrets.vault` | HashiCorp Vault integration | secrets | +| `secrets.aws` | AWS Secrets Manager integration | secrets | + +### Event Sourcing & Messaging Services +| Type | Description | Plugin | +|------|-------------|--------| +| `eventstore.service` | Append-only SQLite event store for execution history | eventstore | +| `dlq.service` | Dead-letter queue service for failed message management | dlq | +| `timeline.service` | Timeline and replay service for execution visualization | timeline | +| `featureflag.service` | Feature flag evaluation engine with SSE change streaming | featureflags | +| `config.provider` | Application configuration registry with schema validation, defaults, and source layering | configprovider | ### Other -| Type | Description | -|------|-------------| -| `cache.modular` | Modular framework cache | -| `jsonschema.modular` | JSON Schema validation | -| `eventbus.modular` | Modular framework EventBus | -| `dynamic.component` | Yaegi hot-reload Go component | -| `data.transformer` | Data transformation | -| `workflow.registry` | Workflow registration and discovery | +| Type | Description | Plugin | +|------|-------------|--------| +| `cache.modular` | Modular framework cache | modularcompat | +| `jsonschema.modular` | JSON Schema validation | modularcompat | +| `dynamic.component` | Yaegi hot-reload Go component | ai | + +> `eventbus.modular` was removed in favor of `messaging.broker.eventbus`. +> `data.transformer` and `workflow.registry` are provided by the `api` plugin (see API & CQRS section above). ## Module Type Reference Detailed configuration reference for module types not covered in the main table above. +--- + +### `openapi` + +Parses an OpenAPI v3 specification file and automatically generates HTTP routes, validates incoming requests against the spec, and optionally serves Swagger UI. Routes are mapped to named pipelines via the `x-pipeline` extension field in the spec. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `spec_file` | string | — | Path to the OpenAPI v3 YAML or JSON spec file (resolved relative to the config file directory). | +| `router` | string | — | Name of the `http.router` module to register routes on. | +| `base_path` | string | `""` | URL path prefix to strip before matching spec paths. | +| `max_body_bytes` | int | `1048576` | Maximum request body size (bytes). | +| `validation.request` | bool | `true` | Validate incoming request bodies, query params, and headers against the spec. | +| `validation.response` | bool | `false` | Validate pipeline response bodies against the spec's response schemas. | +| `validation.response_action` | string | `"warn"` | Action when response validation fails: `"warn"` (log only) or `"error"` (return HTTP 500). | +| `swagger_ui.enabled` | bool | `false` | Serve Swagger UI (requires `spec_file`). | +| `swagger_ui.path` | string | `"/docs"` | URL path at which the Swagger UI is served. | + +**Route mapping via `x-pipeline`:** + +```yaml +# In your OpenAPI spec: +paths: + /users/{id}: + get: + operationId: getUser + x-pipeline: get-user-pipeline +``` + +```yaml +# In your workflow config: +modules: + - name: api-spec + type: openapi + config: + spec_file: ./api/openapi.yaml + router: main-router + validation: + request: true + response: true + response_action: warn + swagger_ui: + enabled: true + path: /docs +``` + +--- + +### `auth.m2m` + +Machine-to-machine (M2M) OAuth2 authentication module. Implements the `client_credentials` grant and `urn:ietf:params:oauth:grant-type:jwt-bearer` assertion grant. Issues signed JWTs (ES256 or HS256) and exposes a JWKS endpoint for token verification by third parties. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `algorithm` | string | `"ES256"` | JWT signing algorithm: `"ES256"` (ECDSA P-256) or `"HS256"` (symmetric HMAC). | +| `secret` | string | — | HMAC secret for HS256 (min 32 bytes). Leave empty when using ES256. | +| `privateKey` | string | — | PEM-encoded EC private key for ES256. If omitted, a key is auto-generated at startup. | +| `tokenExpiry` | duration | `"1h"` | Access token expiration duration (e.g., `"15m"`, `"1h"`). | +| `issuer` | string | `"workflow"` | Token `iss` claim. | +| `clients` | array | `[]` | Registered OAuth2 clients: `[{clientId, clientSecret, scopes, description, claims}]`. | +| `introspect` | object | — | Access-control policy for `POST /oauth/introspect`: `{allowOthers, requiredScope, requiredClaim, requiredClaimVal}`. Default: self-only. | +| `trustedKeys` | array | `[]` | Trusted external JWT issuers for JWT-bearer assertion grants: `[{issuer, publicKeyPEM, algorithm, audiences, claimMapping}]`. Supports literal `\n` in PEM values. | + +**HTTP endpoints provided:** + +| Endpoint | Description | +|----------|-------------| +| `POST /oauth/token` | Issue access token (client_credentials or jwt-bearer grant) | +| `GET /oauth/jwks` | JWKS endpoint for public key distribution | +| `POST /oauth/introspect` | Token introspection | + +**Example:** + +```yaml +modules: + - name: m2m-auth + type: auth.m2m + config: + algorithm: ES256 + tokenExpiry: "1h" + issuer: "my-api" + clients: + - clientId: "service-a" + clientSecret: "${SERVICE_A_SECRET}" + scopes: ["read", "write"] + description: "Internal service A" +``` + +--- + +### `api.gateway` + +Composable API gateway that combines routing, authentication, rate limiting, CORS, and reverse proxying into a single module. Each route entry specifies a path prefix, backend service, and optional per-route overrides. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `routes` | array | yes | Route definitions (see below). | +| `globalRateLimit` | object | no | Global rate limit applied to all routes: `{requestsPerMinute, burstSize}`. | +| `cors` | object | no | CORS settings: `{allowOrigins, allowMethods, allowHeaders, maxAge}`. | +| `auth` | object | no | Default auth settings: `{type: bearer\|api_key\|basic, header}`. | + +**Route fields:** + +| Key | Type | Description | +|-----|------|-------------| +| `pathPrefix` | string | URL path prefix to match (e.g., `/api/v1/orders`). | +| `backend` | string | Backend service name or URL. | +| `stripPrefix` | bool | Strip the path prefix before forwarding. Default: `false`. | +| `auth` | bool | Require authentication for this route. | +| `timeout` | duration | Per-route timeout (e.g., `"30s"`). | +| `methods` | array | Allowed HTTP methods. Empty = all methods. | +| `rateLimit` | object | Per-route rate limit override: `{requestsPerMinute, burstSize}`. | + +**Example:** + +```yaml +modules: + - name: gateway + type: api.gateway + config: + globalRateLimit: + requestsPerMinute: 1000 + burstSize: 50 + cors: + allowOrigins: ["*"] + allowMethods: ["GET", "POST", "PUT", "DELETE"] + routes: + - pathPrefix: /api/v1/orders + backend: orders-service + auth: true + timeout: "30s" + - pathPrefix: /api/v1/public + backend: public-service + auth: false +``` + +--- + +### `database.partitioned` + +PostgreSQL partitioned database module for multi-tenant data isolation. Manages LIST or RANGE partition creation and synchronization against a source table of tenant IDs. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `driver` | string | yes | PostgreSQL driver: `"pgx"`, `"pgx/v5"`, or `"postgres"`. | +| `dsn` | string | yes | PostgreSQL connection string. | +| `partitionKey` | string | yes | Column used for partitioning (e.g., `"tenant_id"`). | +| `tables` | array | yes | Tables to manage partitions for. | +| `partitionType` | string | `"list"` | Partition type: `"list"` (FOR VALUES IN) or `"range"` (FOR VALUES FROM/TO). | +| `partitionNameFormat` | string | `"{table}_{tenant}"` | Template for partition table names. Supports `{table}` and `{tenant}` placeholders. | +| `sourceTable` | string | — | Table containing all tenant IDs for auto-partition sync (e.g., `"tenants"`). | +| `sourceColumn` | string | — | Column in source table to query for tenant values. Defaults to `partitionKey`. | +| `autoSync` | bool | `true` | Automatically sync partitions from `sourceTable` on engine start. Defaults to `true` when `sourceTable` is set, `false` otherwise. | +| `maxOpenConns` | int | `25` | Maximum open database connections. | +| `maxIdleConns` | int | `5` | Maximum idle connections in the pool. | + +**Example:** + +```yaml +modules: + - name: tenant-db + type: database.partitioned + config: + driver: pgx + dsn: "${DATABASE_URL}" + partitionKey: tenant_id + tables: + - orders + - events + - sessions + partitionType: list + partitionNameFormat: "{table}_{tenant}" + sourceTable: tenants + sourceColumn: id +``` + +--- + +### `config.provider` + +Application configuration registry with schema validation, default values, and source layering. Processes `config.provider` modules before all other modules so that `{{config "key"}}` references in the rest of the YAML are expanded at load time. + +**Configuration:** + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| `schema` | array | no | Config key schema definitions: `[{key, type, default, required, description}]`. | +| `sources` | array | no | Value sources loaded in order (later sources override earlier): `[{type: env\|defaults, ...}]`. | + +**Template usage:** + +```yaml +# In any other module's config, reference config registry values: +config: + database_url: "{{config \"DATABASE_URL\"}}" + api_key: "{{config \"API_KEY\"}}" +``` + +**Example:** + +```yaml +modules: + - name: app-config + type: config.provider + config: + schema: + - key: DATABASE_URL + type: string + required: true + description: "PostgreSQL connection string" + - key: API_KEY + type: string + required: true + description: "External API key" + - key: CACHE_TTL + type: string + default: "5m" + description: "Cache entry TTL" + sources: + - type: env +``` + +--- + +### `featureflag.service` + +Feature flag evaluation engine with SQLite persistence and Server-Sent Events (SSE) change streaming. Flag values can be booleans, strings, JSON, or user-segment-based rollouts. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `provider` | string | `"sqlite"` | Storage provider: `"sqlite"` or `"memory"`. | +| `db_path` | string | `"data/featureflags.db"` | SQLite database path. | +| `cache_ttl` | duration | `"5m"` | How long to cache flag evaluations. | +| `sse_enabled` | bool | `false` | Enable SSE endpoint for real-time flag change streaming. | + +**Example:** + +```yaml +modules: + - name: flags + type: featureflag.service + config: + provider: sqlite + db_path: ./data/flags.db + cache_ttl: "1m" + sse_enabled: true +``` + +--- + +### `dlq.service` + +Dead-letter queue (DLQ) service for capturing, inspecting, and replaying failed messages. Backed by an in-memory or SQLite store with configurable retention. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `max_retries` | int | `3` | Maximum delivery attempts before a message is sent to the DLQ. | +| `retention_days` | int | `30` | Number of days to retain dead-lettered messages. | + +**Example:** + +```yaml +modules: + - name: dlq + type: dlq.service + config: + max_retries: 5 + retention_days: 7 +``` + +--- + +### `eventstore.service` + +Append-only event store backed by SQLite for recording execution history. Used by the timeline and replay services. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `db_path` | string | `"data/events.db"` | SQLite database path. | +| `retention_days` | int | `90` | Days to retain recorded events. | + +**Example:** + +```yaml +modules: + - name: event-store + type: eventstore.service + config: + db_path: ./data/events.db + retention_days: 30 +``` + +--- + +### `timeline.service` + +Provides an execution timeline service for step-by-step visualization of past pipeline runs. Reads events from a configured `eventstore.service` module. + +**Configuration:** + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `event_store` | string | `"admin-event-store"` | Name of the `eventstore.service` module to read from. | + +**Example:** + +```yaml +modules: + - name: timeline + type: timeline.service + config: + event_store: event-store +``` + +--- + ### Audit Logging (`audit/`) The `audit/` package provides a structured JSON audit logger for recording security-relevant events. It is used internally by the engine and admin platform -- not a YAML module type, but rather a Go library used by other modules. diff --git a/cmd/wfctl/mcp.go b/cmd/wfctl/mcp.go index a4135a1b..4ef78ac5 100644 --- a/cmd/wfctl/mcp.go +++ b/cmd/wfctl/mcp.go @@ -13,6 +13,7 @@ func runMCP(args []string) error { fs := flag.NewFlagSet("mcp", flag.ContinueOnError) pluginDir := fs.String("plugin-dir", "data/plugins", "Plugin data directory") registryDir := fs.String("registry-dir", "", "Path to cloned workflow-registry for plugin search") + documentationFile := fs.String("documentation-file", "", "Path to DOCUMENTATION.md (auto-detected when empty)") fs.Usage = func() { fmt.Fprintf(fs.Output(), `Usage: wfctl mcp [options] @@ -54,6 +55,9 @@ See docs/mcp.md for full setup instructions. if *registryDir != "" { opts = append(opts, workflowmcp.WithRegistryDir(*registryDir)) } + if *documentationFile != "" { + opts = append(opts, workflowmcp.WithDocumentationFile(*documentationFile)) + } srv := workflowmcp.NewServer(*pluginDir, opts...) return srv.ServeStdio() diff --git a/mcp/server.go b/mcp/server.go index 968949bf..61bdf5f4 100644 --- a/mcp/server.go +++ b/mcp/server.go @@ -57,13 +57,25 @@ func WithRegistryDir(dir string) ServerOption { } } +// WithDocumentationFile sets an explicit path to DOCUMENTATION.md so that the +// workflow://docs/full-reference MCP resource serves the actual repo documentation. +// When not set the server attempts to locate the file automatically (see +// handleDocsFullReference). If the file cannot be found the resource returns a +// brief message directing users to the public documentation URL. +func WithDocumentationFile(path string) ServerOption { + return func(s *Server) { + s.documentationFile = path + } +} + // Server wraps an MCP server instance and provides workflow-engine-specific // tools and resources. type Server struct { - mcpServer *server.MCPServer - pluginDir string - registryDir string - engine EngineProvider // optional; enables execution tools when set + mcpServer *server.MCPServer + pluginDir string + registryDir string + documentationFile string // optional explicit path to DOCUMENTATION.md + engine EngineProvider // optional; enables execution tools when set } // NewServer creates a new MCP server with all workflow engine tools and @@ -278,6 +290,16 @@ func (s *Server) registerResources() { ), s.handleDocsModuleReference, ) + + s.mcpServer.AddResource( + mcp.NewResource( + "workflow://docs/full-reference", + "Full Workflow Engine Documentation", + mcp.WithResourceDescription("Complete DOCUMENTATION.md from the GoCodeAlone/workflow repository: all module types, step types, pipeline steps, template functions, configuration format, workflow types, trigger types, CI/CD steps, platform steps, and detailed per-module reference."), + mcp.WithMIMEType("text/markdown"), + ), + s.handleDocsFullReference, + ) } // --- Tool Handlers --- @@ -635,6 +657,67 @@ func (s *Server) handleDocsModuleReference(_ context.Context, _ mcp.ReadResource }, nil } +// handleDocsFullReference serves the complete DOCUMENTATION.md from the +// GoCodeAlone/workflow repository. It resolves the file in this order: +// 1. The explicit path set via WithDocumentationFile (if provided). +// 2. A path derived from the plugin directory (same parent-of-data layout used +// by handleGetConfigExamples): /../../DOCUMENTATION.md. +// 3. DOCUMENTATION.md in the current working directory. +// +// If none of the candidates can be read, a fallback message with the public +// documentation URL is returned so the resource is always usable. +func (s *Server) handleDocsFullReference(_ context.Context, _ mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) { + content := s.resolveDocumentationContent() + return []mcp.ResourceContents{ + mcp.TextResourceContents{ + URI: "workflow://docs/full-reference", + MIMEType: "text/markdown", + Text: content, + }, + }, nil +} + +// resolveDocumentationContent attempts to read DOCUMENTATION.md from several +// well-known locations and returns its content, or a fallback string on failure. +func (s *Server) resolveDocumentationContent() string { + candidates := s.documentationFileCandidates() + for _, p := range candidates { + if data, err := os.ReadFile(p); err == nil { //nolint:gosec // G304: path derived from trusted server config + return string(data) + } + } + return "# GoCodeAlone/workflow Documentation\n\n" + + "The full documentation (DOCUMENTATION.md) could not be found on the local filesystem.\n\n" + + "Please refer to the repository documentation at:\n" + + "https://github.com/GoCodeAlone/workflow/blob/main/DOCUMENTATION.md\n" +} + +// documentationFileCandidates returns ordered candidate paths for DOCUMENTATION.md. +func (s *Server) documentationFileCandidates() []string { + var candidates []string + + // 1. Explicit override via WithDocumentationFile. + if s.documentationFile != "" { + candidates = append(candidates, s.documentationFile) + } + + // 2. Derive from pluginDir: = .../data/plugins → root = pluginDir/../.. + if s.pluginDir != "" { + pluginBase := filepath.Base(s.pluginDir) + dataDir := filepath.Dir(s.pluginDir) + dataBase := filepath.Base(dataDir) + if pluginBase == "plugins" && dataBase == "data" { + root := filepath.Dir(dataDir) + candidates = append(candidates, filepath.Join(root, "DOCUMENTATION.md")) + } + } + + // 3. Current working directory. + candidates = append(candidates, "DOCUMENTATION.md") + + return candidates +} + // --- Helpers --- func marshalToolResult(v any) (*mcp.CallToolResult, error) { diff --git a/mcp/step_coverage_test.go b/mcp/step_coverage_test.go new file mode 100644 index 00000000..d8696013 --- /dev/null +++ b/mcp/step_coverage_test.go @@ -0,0 +1,247 @@ +package mcp + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/capability" + "github.com/GoCodeAlone/workflow/plugin" + pluginall "github.com/GoCodeAlone/workflow/plugins/all" + "github.com/GoCodeAlone/workflow/schema" + "github.com/mark3labs/mcp-go/mcp" +) + +// registerBuiltinPluginTypesForTest loads all built-in plugins into the global +// schema registries (schema.KnownModuleTypes / schema.GetStepSchemaRegistry) +// so that MCP tools that rely on these registries reflect the full type set. +// This mirrors what happens at runtime when the workflow engine calls LoadPlugin +// for each built-in plugin. +func registerBuiltinPluginTypesForTest(t *testing.T) { + t.Helper() + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + // Register module and step types into the global schema registry so + // that schema.KnownModuleTypes() and handleListStepTypes see them. + for typeName := range loader.ModuleFactories() { + schema.RegisterModuleType(typeName) + } + for typeName := range loader.StepFactories() { + schema.RegisterModuleType(typeName) + } + // Register rich step schemas (descriptions, config fields, outputs). + for _, ss := range loader.StepSchemaRegistry().All() { + schema.GetStepSchemaRegistry().Register(ss) + } + } +} + +// TestListStepTypes_AllBuiltinsPresent validates that every step type registered +// by the built-in plugins (plugins/all) appears in the MCP list_step_types tool +// response. This is the MCP equivalent of TestDocumentationCoverage and ensures +// that wfctl's MCP server accurately reflects all available step types. +func TestListStepTypes_AllBuiltinsPresent(t *testing.T) { + registerBuiltinPluginTypesForTest(t) + + srv := NewServer("") + result, err := srv.handleListStepTypes(context.Background(), mcp.CallToolRequest{}) + if err != nil { + t.Fatalf("handleListStepTypes error: %v", err) + } + + text := extractText(t, result) + var data map[string]any + if err := json.Unmarshal([]byte(text), &data); err != nil { + t.Fatalf("failed to parse result JSON: %v", err) + } + + steps, ok := data["step_types"].([]any) + if !ok { + t.Fatal("step_types not found in result") + } + listed := make(map[string]bool, len(steps)) + for _, s := range steps { + if entry, ok := s.(map[string]any); ok { + if typeName, ok := entry["type"].(string); ok { + listed[typeName] = true + } + } + } + + // Collect all step types from the built-in plugins. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + } + + var missing []string + for typeName := range loader.StepFactories() { + if !listed[typeName] { + missing = append(missing, typeName) + } + } + + if len(missing) > 0 { + sort.Strings(missing) + t.Errorf("step types registered by built-in plugins but missing from list_step_types (%d missing):\n %s\n\n"+ + "Add these step types to schema/schema.go coreModuleTypes slice "+ + "or register them via schema.RegisterModuleType so they appear in KnownModuleTypes.", + len(missing), strings.Join(missing, "\n ")) + } +} + +// TestListModuleTypes_AllBuiltinsPresent validates that every module type registered +// by the built-in plugins (plugins/all) appears in the MCP list_module_types tool +// response. +func TestListModuleTypes_AllBuiltinsPresent(t *testing.T) { + registerBuiltinPluginTypesForTest(t) + + srv := NewServer("") + result, err := srv.handleListModuleTypes(context.Background(), mcp.CallToolRequest{}) + if err != nil { + t.Fatalf("handleListModuleTypes error: %v", err) + } + + text := extractText(t, result) + var data map[string]any + if err := json.Unmarshal([]byte(text), &data); err != nil { + t.Fatalf("failed to parse result JSON: %v", err) + } + + rawTypes, ok := data["module_types"].([]any) + if !ok { + t.Fatal("module_types not found in result") + } + listed := make(map[string]bool, len(rawTypes)) + for _, mt := range rawTypes { + if s, ok := mt.(string); ok { + listed[s] = true + } + } + + // Collect all module types from the built-in plugins. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range pluginall.DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) failed: %v", p.Name(), err) + } + } + + var missing []string + for typeName := range loader.ModuleFactories() { + if !listed[typeName] { + missing = append(missing, typeName) + } + } + + if len(missing) > 0 { + sort.Strings(missing) + t.Errorf("module types registered by built-in plugins but missing from list_module_types (%d missing):\n %s\n\n"+ + "Add these module types to schema/schema.go coreModuleTypes slice "+ + "or register them via schema.RegisterModuleType so they appear in KnownModuleTypes.", + len(missing), strings.Join(missing, "\n ")) + } +} + +// TestDocsFullReference_Fallback verifies that the full-reference resource +// returns a usable fallback when DOCUMENTATION.md cannot be found. +func TestDocsFullReference_Fallback(t *testing.T) { + // Use a server with a non-existent plugin dir so no file is found. + srv := NewServer("/nonexistent/data/plugins") + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(contents) != 1 { + t.Fatalf("expected 1 resource content, got %d", len(contents)) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + if text.URI != "workflow://docs/full-reference" { + t.Errorf("unexpected URI: %q", text.URI) + } + if text.MIMEType != "text/markdown" { + t.Errorf("unexpected MIME type: %q", text.MIMEType) + } + if !strings.Contains(text.Text, "GoCodeAlone/workflow") { + t.Error("fallback text should mention 'GoCodeAlone/workflow'") + } +} + +// TestDocsFullReference_WithFile verifies that the full-reference resource +// serves the provided file content when WithDocumentationFile is used. +func TestDocsFullReference_WithFile(t *testing.T) { + // Write a temporary DOCUMENTATION.md-like file. + dir := t.TempDir() + docPath := filepath.Join(dir, "DOCUMENTATION.md") + content := "# Workflow Engine Documentation\n\nTest content.\n" + if err := os.WriteFile(docPath, []byte(content), 0600); err != nil { + t.Fatalf("failed to write temp file: %v", err) + } + + srv := NewServer("", WithDocumentationFile(docPath)) + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(contents) != 1 { + t.Fatalf("expected 1 resource content, got %d", len(contents)) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + if text.Text != content { + t.Errorf("expected file content %q, got %q", content, text.Text) + } +} + +// TestDocsFullReference_RepoFile verifies that the full-reference resource +// serves the actual DOCUMENTATION.md when it exists next to the test. +func TestDocsFullReference_RepoFile(t *testing.T) { + // Locate the repo root via the test file's path. + _, testFilePath, _, ok := runtime.Caller(0) + if !ok { + t.Skip("runtime.Caller failed") + } + repoRoot := filepath.Join(filepath.Dir(testFilePath), "..") + docPath := filepath.Join(repoRoot, "DOCUMENTATION.md") + if _, err := os.Stat(docPath); err != nil { + t.Skipf("DOCUMENTATION.md not found at %q: %v", docPath, err) + } + + srv := NewServer("", WithDocumentationFile(docPath)) + contents, err := srv.handleDocsFullReference(context.Background(), mcp.ReadResourceRequest{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + text, ok := contents[0].(mcp.TextResourceContents) + if !ok { + t.Fatal("expected TextResourceContents") + } + + // Spot-check a few key strings that should be in DOCUMENTATION.md. + for _, want := range []string{"openapi", "auth.m2m", "database.partitioned", "config.provider"} { + if !strings.Contains(text.Text, want) { + t.Errorf("DOCUMENTATION.md should contain %q", want) + } + } +} diff --git a/plugins/all/all.go b/plugins/all/all.go index d5453a32..9056a6d9 100644 --- a/plugins/all/all.go +++ b/plugins/all/all.go @@ -42,6 +42,7 @@ import ( pluginmessaging "github.com/GoCodeAlone/workflow/plugins/messaging" pluginmodcompat "github.com/GoCodeAlone/workflow/plugins/modularcompat" pluginobs "github.com/GoCodeAlone/workflow/plugins/observability" + pluginopenapi "github.com/GoCodeAlone/workflow/plugins/openapi" pluginpipeline "github.com/GoCodeAlone/workflow/plugins/pipelinesteps" pluginplatform "github.com/GoCodeAlone/workflow/plugins/platform" pluginpolicy "github.com/GoCodeAlone/workflow/plugins/policy" @@ -67,6 +68,7 @@ func DefaultPlugins() []plugin.EnginePlugin { pluginlicense.New(), pluginconfigprovider.New(), pluginhttp.New(), + pluginopenapi.New(), pluginobs.New(), pluginmessaging.New(), pluginsm.New(), diff --git a/plugins/all/doc_coverage_test.go b/plugins/all/doc_coverage_test.go new file mode 100644 index 00000000..898e7485 --- /dev/null +++ b/plugins/all/doc_coverage_test.go @@ -0,0 +1,75 @@ +package all + +import ( + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" + + "github.com/GoCodeAlone/workflow/capability" + "github.com/GoCodeAlone/workflow/plugin" + "github.com/GoCodeAlone/workflow/schema" +) + +// TestDocumentationCoverage verifies that every registered module type and +// step type appears in DOCUMENTATION.md at least once (as a backtick-quoted +// string, e.g. `my.module`). This test is intended to catch drift between +// the plugin registrations and the public-facing documentation. +// +// If a new module or step type is added but the documentation is not updated, +// this test will fail with a list of the missing entries so they can be added +// to DOCUMENTATION.md. +func TestDocumentationCoverage(t *testing.T) { + // Locate DOCUMENTATION.md relative to this test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + t.Fatal("runtime.Caller failed") + } + docPath := filepath.Join(filepath.Dir(filename), "..", "..", "DOCUMENTATION.md") + + raw, err := os.ReadFile(docPath) //nolint:gosec // path constructed from known repo structure + if err != nil { + t.Fatalf("read DOCUMENTATION.md: %v", err) + } + docContent := string(raw) + + // Load all built-in plugins into a throwaway loader. + capReg := capability.NewRegistry() + schemaReg := schema.NewModuleSchemaRegistry() + loader := plugin.NewPluginLoader(capReg, schemaReg) + for _, p := range DefaultPlugins() { + if err := loader.LoadPlugin(p); err != nil { + t.Fatalf("LoadPlugin(%q) error: %v", p.Name(), err) + } + } + + // Collect module types missing from docs. + var missingModules []string + for typeName := range loader.ModuleFactories() { + if !strings.Contains(docContent, "`"+typeName+"`") { + missingModules = append(missingModules, typeName) + } + } + + // Collect step types missing from docs. + var missingSteps []string + for typeName := range loader.StepFactories() { + if !strings.Contains(docContent, "`"+typeName+"`") { + missingSteps = append(missingSteps, typeName) + } + } + + if len(missingModules) > 0 { + sort.Strings(missingModules) + t.Errorf("module types registered but not documented in DOCUMENTATION.md (%d missing):\n %s\n\nAdd a row for each type to the appropriate section of DOCUMENTATION.md.", + len(missingModules), strings.Join(missingModules, "\n ")) + } + + if len(missingSteps) > 0 { + sort.Strings(missingSteps) + t.Errorf("step types registered but not documented in DOCUMENTATION.md (%d missing):\n %s\n\nAdd a row for each type to the appropriate section of DOCUMENTATION.md.", + len(missingSteps), strings.Join(missingSteps, "\n ")) + } +}