From 6737317ca1dce2c3a8edef775a483fd5ae1f1e7d Mon Sep 17 00:00:00 2001 From: Nicolas Dreno Date: Tue, 5 May 2026 10:45:57 +0200 Subject: [PATCH] =?UTF-8?q?release:=200.7.0=20=E2=80=94=20AI=20gateway=20e?= =?UTF-8?q?xtensions=20(ADR-0024=20+=20ADR-0030)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Bump workspace version 0.6.3 → 0.7.0 (single bump; all crates inherit via version.workspace = true; internal path-deps updated to match) - Cut CHANGELOG [Unreleased] → [0.7.0] - 2026-05-05; consolidate the three scattered ### Added blocks into thematic subsections (caller-owned model, stateless Responses, /v1/models, policy middlewares, CEL extensions, dev workflow); fill in stale comparison links from v0.4.1 forward - Bump pinned plugin URL example in docs to v0.7.0 --- CHANGELOG.md | 61 ++++++++++++++++++++------------ Cargo.lock | 18 +++++----- Cargo.toml | 20 +++++------ docs/guide/spec-configuration.md | 2 +- 4 files changed, 58 insertions(+), 43 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1a11a3..61ae4da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,41 +7,47 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.7.0] - 2026-05-05 + +Headline: AI gateway extensions land — caller-owned model, glob-based dynamic routing, stateless Responses API, aggregated model catalog, and four new policy middlewares (ADR-0024 + ADR-0030). + ### Added -- **plugin**: `ai-proxy` `GET /v1/models` — model catalog aggregator (ADR-0030 §4). Walks every unique provider declared in `routes`, `targets`, and the flat config, queries each upstream's `/v1/models` (or `/api/tags` for Ollama, then translates the shape), and returns an OpenAI-compatible `{ object: "list", data: [...] }` payload. On per-provider failure (5xx, timeout, connection error) returns `200 OK` with `partial: true` + `warnings: [{provider, status, detail}]` so a single flaky upstream doesn't take the discovery endpoint down. Each failure increments `barbacane_plugin_ai_proxy_models_provider_failures_total{provider}`. Caching, single-flight, and route-based filtering of advertised models are tracked as a follow-up; v1 hits upstreams on every call (the endpoint is not on the data plane critical path). -- **spec-fragment**: `schemas/ai-gateway.yaml` — operator-facing OpenAPI fragment declaring all three AI gateway operations (`/v1/chat/completions`, `/v1/responses`, `/v1/models`) bound to `ai-proxy`. Drop into your project's `specs/` folder; multi-file spec discovery picks it up automatically. Provider keys + base URL are read from `OPENAI_API_KEY` / `ANTHROPIC_API_KEY` / `OLLAMA_BASE_URL` via `env://` references. Default routes are `claude-*` → Anthropic, `gpt-*` / `o[1-4]*` → OpenAI, `*` → Ollama; copy the fragment into your own folder to customise. Tested via `test_shipped_ai_gateway_spec_fragment_compiles` so the artifact build is regression-protected. -- **cli**: `barbacane dev` — local development server with file watching and automatic hot-reload. Compiles specs, starts the gateway, watches for changes to specs, manifest, and plugin WASM files, and reloads the gateway on every save. No more manual compile-serve cycle during development. -- **compiler**: `specs` field in `barbacane.yaml` — point to a folder (e.g., `specs: ./specs/`) and all `*.yaml`/`*.json` files are discovered automatically. Used by `barbacane dev` for zero-config operation and as a fallback for `barbacane compile` when `--spec` is omitted. -- **cli**: `barbacane compile` now discovers specs from the manifest's `specs` folder when `--spec` is not provided — `barbacane compile -m barbacane.yaml -o api.bca` works with zero spec args. -- **cli**: `barbacane init` now scaffolds a `specs/` directory and places the generated spec in `specs/api.yaml` with `specs: ./specs/` in the manifest. -- **plugin**: `cel` now binds `request.body_json` in addition to the existing `request.body` string when the inbound `content-type` is `application/json` or any `application/*+json` vendor type. Enables consumer-policy expressions like `request.body_json.model.startsWith('gpt-4o')` without writing string-matching CEL. Empty map on non-JSON content-types and on parse failures (warning logged on failure; never short-circuits the request — a CEL plugin that 500s on every garbled body would let an attacker take down every downstream policy with one bad byte). Prereq for the AI consumer-policy examples in ADR-0030. -- **plugin**: `cel` `on_match.deny: { status, code, message? }` — reject the request with a configurable problem+json status and code when the expression matches. The `code` is exposed both as the URN suffix on the response `type` field and as a `code` field on the body, matching the `error.type = "model_not_permitted"` convention used by `ai-proxy`. Status defaults to 403 and is clamped into the 4xx range — denying as 5xx would mask a policy decision as a server fault. When both `set_context` and `deny` are configured for the same `on_match`, `deny` wins on a match and context is not written. `OnMatch` and `DenyAction` now `deny_unknown_fields`, so operator typos surface at config-load time instead of being silently dropped. +#### AI Gateway — Caller-owned model + dynamic routing (ADR-0030 §0, §3) +- **plugin**: `ai-proxy` `routes` table — glob-based dynamic model routing (ADR-0030 §3). Each entry has a `pattern` (`*`, `?`, `[...]` glob, case-sensitive), a `provider`, and optional `api_key` / `base_url` / `allow` / `deny`. First match wins. Resolution precedence: `ai.target` context key (set by `cel`) > routes glob match > `default_target` > flat `provider`. When `routes` is configured but no entry matches and there's no fallthrough, the dispatcher returns `400 problem+json` with `code: "no_route"` rather than silently picking a default. +- **plugin**: `ai-proxy` `allow` / `deny` glob lists on `targets.` and on `routes[]` entries — catalog policy attached to the target. Evaluated against the client's `model` after resolution; `deny` is evaluated after `allow`. A denied model returns `403 problem+json` with `code: "model_not_permitted"` and does NOT fall through to fallback or to another route — that would silently escalate to a different provider. Critically, this applies on every resolution path: a `cel` misconfig that sets `ai.target` to a target whose `deny` covers the requested model still gets 403, because catalog policy is a property of the target, not the resolution path. +- **plugin**: `ai-proxy` emits `resolution_total{resolution="context|routes|default|flat"}` counter and a debug log `ai-proxy: resolved provider=X via=Y` so operators can debug "why did my request go to provider X?". + +#### AI Gateway — Stateless Responses API (ADR-0030 §2) +- **plugin**: `ai-proxy` `POST /v1/responses` — OpenAI Responses API support, stateless only. For OpenAI provider, the dispatcher passes through to the upstream `/v1/responses` and **rewrites the response `id` to a synthetic `resp_`** so the gateway's stateless contract holds uniformly across providers — without this, OpenAI's real id leaks to the client and they could send it back as `previous_response_id` (which we 400 on). For Anthropic, the request is translated to Messages API: `input_text`/`input_image` → `text`/`image` content blocks, `function_call` + `function_call_output` → `tool_use` + `tool_result`, `reasoning` items are dropped (Anthropic doesn't accept client-supplied reasoning). The response is translated back to Responses shape with a synthetic time-ordered `id`. For Ollama, returns 400 `responses_not_supported_for_provider` (Ollama's OpenAI-compat surface is Chat Completions only). Streaming SSE on the OpenAI passthrough does not rewrite the in-event id — true SSE handling is deferred for both protocols (ADR-0030 §2 "Streaming"). +- **plugin**: `ai-proxy` `previous_response_id` returns 400 `previous_response_id_not_supported`. The stateful Responses API (`previous_response_id` + `GET /v1/responses/{id}` retrieval) requires session-scoped storage that ADR-0030 §2 explicitly defers; the rejection is the forward-compatibility hook. +- **plugin**: `ai-proxy` `store` flag is permissive — `true`, `false`, and absent all flow through unchanged. When `store ≠ false` (most clients send `true` as an unexamined default), the dispatcher emits a `Warning: 299 - "store ignored; gateway is stateless, see ADR-0030"` header and increments `barbacane_plugin_ai_proxy_responses_store_downgrades_total`. Operators can quantify stateful-API usage and decide whether to prioritize the future session-storage capability. +- **plugin**: `ai-proxy` `reasoning` items dropped on the Responses → Anthropic translation path emit `Warning: 299 - "reasoning items dropped..."` and increment `barbacane_plugin_ai_proxy_responses_reasoning_dropped_total`. Silent reasoning drops can degrade output quality on multi-turn agent flows in ways the client cannot detect. + +#### AI Gateway — Aggregated model catalog (ADR-0030 §4) +- **plugin**: `ai-proxy` `GET /v1/models` — model catalog aggregator. Walks every unique provider declared in `routes`, `targets`, and the flat config, queries each upstream's `/v1/models` (or `/api/tags` for Ollama, then translates the shape), and returns an OpenAI-compatible `{ object: "list", data: [...] }` payload. On per-provider failure (5xx, timeout, connection error) returns `200 OK` with `partial: true` + `warnings: [{provider, status, detail}]` so a single flaky upstream doesn't take the discovery endpoint down. Each failure increments `barbacane_plugin_ai_proxy_models_provider_failures_total{provider}`. Caching, single-flight, and route-based filtering of advertised models are tracked as a follow-up; v1 hits upstreams on every call (the endpoint is not on the data plane critical path). +- **spec-fragment**: `schemas/ai-gateway.yaml` — operator-facing OpenAPI fragment declaring all three AI gateway operations (`/v1/chat/completions`, `/v1/responses`, `/v1/models`) bound to `ai-proxy`. Drop into your project's `specs/` folder; multi-file spec discovery picks it up automatically. Provider keys + base URL are read from `OPENAI_API_KEY` / `ANTHROPIC_API_KEY` / `OLLAMA_BASE_URL` via `env://` references. Default routes are `claude-*` → Anthropic, `gpt-*` / `o[1-4]*` → OpenAI, `*` → Ollama; copy the fragment into your own folder to customise. Tested via `test_shipped_ai_gateway_spec_fragment_compiles` so the artifact build is regression-protected. -#### AI Gateway middlewares (ADR-0024) +#### AI Gateway — Policy middlewares (ADR-0024) - **`ai-prompt-guard` middleware plugin**: validates LLM chat-completion requests before dispatch — named profiles carry `max_messages`, `max_message_length`, regex `blocked_patterns`, and managed `system_template` with `{var}` substitution. Short-circuits with 400 + RFC 9457 problem+json on violation. - **`ai-token-limit` middleware plugin**: token-based sliding-window rate limiting for LLM endpoints. Named profiles carry `quota` + `window` (seconds); `partition_key` / `policy_name` / `count` stay top-level. Advisory semantics: streaming responses can't be interrupted mid-flight, so overshoots are absorbed and the next request 429s. Emits standard `ratelimit-*` response headers. - **`ai-cost-tracker` middleware plugin**: per-request LLM cost in USD from a configurable `provider/model` price table (USD per 1,000 tokens). Emits the Prometheus counter `barbacane_plugin_ai_cost_tracker_cost_dollars` with `provider` and `model` labels for Grafana spend dashboards. No profile map — prices are operator facts, not policy. - **`ai-response-guard` middleware plugin**: inspects LLM responses (OpenAI chat-completion format) in on_response. Named profiles carry `redact` rules (regex → replacement, scoped to `choices[].message.content` and `delta.content`) and `blocked_patterns` (match replaces the response with 502). Streamed responses cannot be redacted after the fact; the plugin emits `redactions_skipped_streaming_total` instead. - **Named-profile + CEL composition pattern**: all four AI middlewares read a `context_key` (default `ai.policy`, overridable) to select the active profile. A `cel` middleware upstream writes `ai.policy` via `on_match.set_context`; one CEL decision fans out to prompt strictness, token budget, redaction strictness, and the `ai-proxy` dispatcher's named targets (via `ai.target`). -### Added -- **plugin**: `ai-proxy` `POST /v1/responses` — OpenAI Responses API support, stateless only (ADR-0030 §2). For OpenAI provider, the dispatcher passes through to the upstream `/v1/responses` and **rewrites the response `id` to a synthetic `resp_`** so the gateway's stateless contract holds uniformly across providers — without this, OpenAI's real id leaks to the client and they could send it back as `previous_response_id` (which we 400 on). For Anthropic, the request is translated to Messages API: `input_text`/`input_image` → `text`/`image` content blocks, `function_call` + `function_call_output` → `tool_use` + `tool_result`, `reasoning` items are dropped (Anthropic doesn't accept client-supplied reasoning). The response is translated back to Responses shape with a synthetic time-ordered `id`. For Ollama, returns 400 `responses_not_supported_for_provider` (Ollama's OpenAI-compat surface is Chat Completions only). Streaming SSE on the OpenAI passthrough does not rewrite the in-event id — true SSE handling is deferred for both protocols (ADR-0030 §2 "Streaming"). -- **plugin**: `ai-proxy` `previous_response_id` returns 400 `previous_response_id_not_supported`. The stateful Responses API (`previous_response_id` + `GET /v1/responses/{id}` retrieval) requires session-scoped storage that ADR-0030 §2 explicitly defers; the rejection is the forward-compatibility hook. -- **plugin**: `ai-proxy` `store` flag is permissive — `true`, `false`, and absent all flow through unchanged. When `store ≠ false` (most clients send `true` as an unexamined default), the dispatcher emits a `Warning: 299 - "store ignored; gateway is stateless, see ADR-0030"` header and increments `barbacane_plugin_ai_proxy_responses_store_downgrades_total`. Operators can quantify stateful-API usage and decide whether to prioritize the future session-storage capability. -- **plugin**: `ai-proxy` `reasoning` items dropped on the Responses → Anthropic translation path emit `Warning: 299 - "reasoning items dropped..."` and increment `barbacane_plugin_ai_proxy_responses_reasoning_dropped_total`. Silent reasoning drops can degrade output quality on multi-turn agent flows in ways the client cannot detect. - -### Fixed -- **plugin**: `ai-proxy` no longer returns `404 Not Found` when the operation is bound to a path other than `/v1/chat/completions`. The path-based dispatch added in PR-1 was too strict — operators are free to bind `ai-proxy` to any operation path, and the dispatcher routes Chat Completions requests through unchanged. PR-4 will reintroduce path-based dispatch narrowly when `/v1/responses` actually has a second protocol to differentiate. +#### CEL extensions for AI policy +- **plugin**: `cel` now binds `request.body_json` in addition to the existing `request.body` string when the inbound `content-type` is `application/json` or any `application/*+json` vendor type. Enables consumer-policy expressions like `request.body_json.model.startsWith('gpt-4o')` without writing string-matching CEL. Empty map on non-JSON content-types and on parse failures (warning logged on failure; never short-circuits the request — a CEL plugin that 500s on every garbled body would let an attacker take down every downstream policy with one bad byte). Prereq for the AI consumer-policy examples in ADR-0030. +- **plugin**: `cel` `on_match.deny: { status, code, message? }` — reject the request with a configurable problem+json status and code when the expression matches. The `code` is exposed both as the URN suffix on the response `type` field and as a `code` field on the body, matching the `error.type = "model_not_permitted"` convention used by `ai-proxy`. Status defaults to 403 and is clamped into the 4xx range — denying as 5xx would mask a policy decision as a server fault. When both `set_context` and `deny` are configured for the same `on_match`, `deny` wins on a match and context is not written. `OnMatch` and `DenyAction` now `deny_unknown_fields`, so operator typos surface at config-load time instead of being silently dropped. -### Added -- **plugin**: `ai-proxy` `routes` table — glob-based dynamic model routing (ADR-0030 §3). Each entry has a `pattern` (`*`, `?`, `[...]` glob, case-sensitive), a `provider`, and optional `api_key` / `base_url` / `allow` / `deny`. First match wins. Resolution precedence: `ai.target` context key (set by `cel`) > routes glob match > `default_target` > flat `provider`. When `routes` is configured but no entry matches and there's no fallthrough, the dispatcher returns `400 problem+json` with `code: "no_route"` rather than silently picking a default. -- **plugin**: `ai-proxy` `allow` / `deny` glob lists on `targets.` and on `routes[]` entries — catalog policy attached to the target. Evaluated against the client's `model` after resolution; `deny` is evaluated after `allow`. A denied model returns `403 problem+json` with `code: "model_not_permitted"` and does NOT fall through to fallback or to another route — that would silently escalate to a different provider. Critically, this applies on every resolution path: a `cel` misconfig that sets `ai.target` to a target whose `deny` covers the requested model still gets 403, because catalog policy is a property of the target, not the resolution path. -- **plugin**: `ai-proxy` emits `resolution_total{resolution="context|routes|default|flat"}` counter and a debug log `ai-proxy: resolved provider=X via=Y` so operators can debug "why did my request go to provider X?". +#### Local development workflow +- **cli**: `barbacane dev` — local development server with file watching and automatic hot-reload. Compiles specs, starts the gateway, watches for changes to specs, manifest, and plugin WASM files, and reloads the gateway on every save. No more manual compile-serve cycle during development. +- **compiler**: `specs` field in `barbacane.yaml` — point to a folder (e.g., `specs: ./specs/`) and all `*.yaml`/`*.json` files are discovered automatically. Used by `barbacane dev` for zero-config operation and as a fallback for `barbacane compile` when `--spec` is omitted. +- **cli**: `barbacane compile` now discovers specs from the manifest's `specs` folder when `--spec` is not provided — `barbacane compile -m barbacane.yaml -o api.bca` works with zero spec args. +- **cli**: `barbacane init` now scaffolds a `specs/` directory and places the generated spec in `specs/api.yaml` with `specs: ./specs/` in the manifest. ### Changed -- **plugin**: `cel` `on_match` now rejects unknown fields at config-load time (`deny_unknown_fields`). Previously a typo like `on_match: { set_contxt: {...} }` was silently dropped — and crucially, the `on_match.deny` block proposed in early ADR-0030 drafts was silently dropped against any pre-existing `cel` deployment, which is the bug that motivated this PR. Operators with typos in `on_match` will now see a clear deserialization error instead of a silently broken policy. - **plugin (BREAKING)**: `ai-proxy` no longer accepts a `model` field on flat config, on `targets.`, or on `fallback[]` entries (ADR-0030 §0 — caller-owned model). The model identifier is now always taken from the client's `model` field on the request body and passed to the upstream provider verbatim; the gateway never picks a default. Migration: delete `model:` from every `ai-proxy` config block. Operators upgrading get a clear error from both layers — vacuum lints `Unknown config field "model" for dispatcher "ai-proxy"` against the JSON schema, and the runtime `serde(deny_unknown_fields)` rejects leftover nested `model:` at WASM instance load. Requests that omit `model` (or send an empty string / non-string value) now get `400 problem+json` with `code: "model_required"` and `type: "urn:barbacane:error:model_required"`. +- **plugin**: `cel` `on_match` now rejects unknown fields at config-load time (`deny_unknown_fields`). Previously a typo like `on_match: { set_contxt: {...} }` was silently dropped — and crucially, the `on_match.deny` block proposed in early ADR-0030 drafts was silently dropped against any pre-existing `cel` deployment, which is the bug that motivated this PR. Operators with typos in `on_match` will now see a clear deserialization error instead of a silently broken policy. - **plugin**: `ai-token-limit` config now uses `quota` + `window` (seconds) — aligned with the `rate-limit` plugin — instead of `max_tokens_per_minute` / `max_tokens_per_hour`. For multiple concurrent windows (e.g. per-minute and per-hour caps), stack two instances of the middleware with different `policy_name`s. - **plugin**: AI guard/limit plugins (`ai-prompt-guard`, `ai-token-limit`, `ai-response-guard`) **fail-closed** on misconfiguration — a missing `default_profile` or invalid regex in a profile returns `500 problem+json` instead of silently letting traffic through. A silently disabled PII rule is precisely the class of bug operators only catch from an incident. - **plugin**: `ai-token-limit` now persists the resolved partition key into context between `on_request` and `on_response` (scoped by `policy_name`) so `client_ip` and `header:*` partition sources charge the same bucket the request was admitted against. Previously token consumption leaked into a shared `"unknown"` bucket, effectively disabling per-consumer budgeting for those partition sources. @@ -49,6 +55,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - **gateway**: dispatcher plugins now receive the middleware chain's accumulated context — previously `host_context_get` calls inside a dispatcher (e.g. `ai-proxy` reading `ai.target` written by `cel`) returned nothing because the dispatcher instance was started with an empty context. This also means context keys *written* by a dispatcher (e.g. `ai.prompt_tokens` from `ai-proxy`) now flow into the `on_response` middleware chain, which is what makes `ai-cost-tracker` and `ai-token-limit` actually see token usage. - **gateway**: stale framing headers (`content-length`, `transfer-encoding`, `connection`, `keep-alive`) from upstream responses are stripped before returning to the client so `on_response` middleware that mutates the body (e.g. `ai-response-guard` PII redaction) doesn't cause `IncompleteMessage` errors from a length mismatch. +- **plugin**: `ai-proxy` no longer returns `404 Not Found` when the operation is bound to a path other than `/v1/chat/completions`. The path-based dispatch added in PR-1 was too strict — operators are free to bind `ai-proxy` to any operation path, and the dispatcher routes Chat Completions requests through unchanged. ## [0.6.3] - 2026-04-07 @@ -602,7 +609,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Comprehensive documentation - GitHub Actions CI -[Unreleased]: https://github.com/barbacane-dev/Barbacane/compare/v0.4.1...HEAD +[Unreleased]: https://github.com/barbacane-dev/Barbacane/compare/v0.7.0...HEAD +[0.7.0]: https://github.com/barbacane-dev/Barbacane/compare/v0.6.3...v0.7.0 +[0.6.3]: https://github.com/barbacane-dev/Barbacane/compare/v0.6.2...v0.6.3 +[0.6.2]: https://github.com/barbacane-dev/Barbacane/compare/v0.6.1...v0.6.2 +[0.6.1]: https://github.com/barbacane-dev/Barbacane/compare/v0.6.0...v0.6.1 +[0.6.0]: https://github.com/barbacane-dev/Barbacane/compare/v0.5.2...v0.6.0 +[0.5.2]: https://github.com/barbacane-dev/Barbacane/compare/v0.5.1...v0.5.2 +[0.5.1]: https://github.com/barbacane-dev/Barbacane/compare/v0.5.0...v0.5.1 +[0.5.0]: https://github.com/barbacane-dev/Barbacane/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/barbacane-dev/Barbacane/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/barbacane-dev/Barbacane/compare/v0.3.1...v0.4.0 [0.3.1]: https://github.com/barbacane-dev/Barbacane/compare/v0.3.0...v0.3.1 diff --git a/Cargo.lock b/Cargo.lock index 4111a60..3496921 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -401,7 +401,7 @@ dependencies = [ [[package]] name = "barbacane" -version = "0.6.3" +version = "0.7.0" dependencies = [ "arc-swap", "barbacane-compiler", @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "barbacane-compiler" -version = "0.6.3" +version = "0.7.0" dependencies = [ "chrono", "criterion", @@ -457,7 +457,7 @@ dependencies = [ [[package]] name = "barbacane-control" -version = "0.6.3" +version = "0.7.0" dependencies = [ "anyhow", "axum 0.8.8", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "barbacane-plugin-macros" -version = "0.6.3" +version = "0.7.0" dependencies = [ "quote", "syn", @@ -498,7 +498,7 @@ dependencies = [ [[package]] name = "barbacane-plugin-sdk" -version = "0.6.3" +version = "0.7.0" dependencies = [ "barbacane-plugin-macros", "base64", @@ -508,7 +508,7 @@ dependencies = [ [[package]] name = "barbacane-sigv4" -version = "0.6.3" +version = "0.7.0" dependencies = [ "hex", "hmac", @@ -517,7 +517,7 @@ dependencies = [ [[package]] name = "barbacane-telemetry" -version = "0.6.3" +version = "0.7.0" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -531,7 +531,7 @@ dependencies = [ [[package]] name = "barbacane-test" -version = "0.6.3" +version = "0.7.0" dependencies = [ "assert_cmd", "barbacane-compiler", @@ -552,7 +552,7 @@ dependencies = [ [[package]] name = "barbacane-wasm" -version = "0.6.3" +version = "0.7.0" dependencies = [ "anyhow", "async-nats", diff --git a/Cargo.toml b/Cargo.toml index 6cfd715..1e449ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ exclude = [ resolver = "2" [workspace.package] -version = "0.6.3" +version = "0.7.0" edition = "2021" license = "AGPL-3.0-only" repository = "https://github.com/barbacane-dev/Barbacane" @@ -139,15 +139,15 @@ assert_cmd = "2" predicates = "3" # Workspace crates (version required for crates.io publishing) -barbacane-sigv4 = { path = "crates/barbacane-sigv4", version = "0.6.3" } -barbacane-compiler = { path = "crates/barbacane-compiler", version = "0.6.3" } -barbacane-plugin-sdk = { path = "crates/barbacane-plugin-sdk", version = "0.6.3" } -barbacane-plugin-macros = { path = "crates/barbacane-plugin-macros", version = "0.6.3" } -barbacane-wasm = { path = "crates/barbacane-wasm", version = "0.6.3" } -barbacane-telemetry = { path = "crates/barbacane-telemetry", version = "0.6.3" } -barbacane-test = { path = "crates/barbacane-test", version = "0.6.3" } -barbacane = { path = "crates/barbacane", version = "0.6.3" } -barbacane-control = { path = "crates/barbacane-control", version = "0.6.3" } +barbacane-sigv4 = { path = "crates/barbacane-sigv4", version = "0.7.0" } +barbacane-compiler = { path = "crates/barbacane-compiler", version = "0.7.0" } +barbacane-plugin-sdk = { path = "crates/barbacane-plugin-sdk", version = "0.7.0" } +barbacane-plugin-macros = { path = "crates/barbacane-plugin-macros", version = "0.7.0" } +barbacane-wasm = { path = "crates/barbacane-wasm", version = "0.7.0" } +barbacane-telemetry = { path = "crates/barbacane-telemetry", version = "0.7.0" } +barbacane-test = { path = "crates/barbacane-test", version = "0.7.0" } +barbacane = { path = "crates/barbacane", version = "0.7.0" } +barbacane-control = { path = "crates/barbacane-control", version = "0.7.0" } [profile.dev] opt-level = 0 diff --git a/docs/guide/spec-configuration.md b/docs/guide/spec-configuration.md index 5689a05..449ae6e 100644 --- a/docs/guide/spec-configuration.md +++ b/docs/guide/spec-configuration.md @@ -78,7 +78,7 @@ plugins: http-upstream: path: ./plugins/http-upstream.wasm jwt-auth: - url: https://github.com/barbacane-dev/barbacane/releases/download/v0.6.3/jwt-auth.wasm + url: https://github.com/barbacane-dev/barbacane/releases/download/v0.7.0/jwt-auth.wasm sha256: abc123... ```