diff --git a/CHANGELOG.md b/CHANGELOG.md index 96ef07f7..e2cbc4b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,8 +14,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - AI usage analytics: per-model token tracking with agent/session context, BYOK vs platform key breakdown - Vercel AI SDK tracing examples (Node.js) and Python GenAI tracing examples - AI tracing documentation page +- Environment password protection: cookie-based password wall for environments with HMAC-signed cookies, argon2 password hashing, and HTML password form served by the proxy; set via environment settings API with automatic cookie invalidation on password change - Funnel card step pipeline: funnel list cards now show a horizontal pipeline of steps with completions count and conversion rate per step (e.g., `page_view 1,234 → signup 890 (72%)`) alongside the existing summary metrics +### Changed +- Embedded userspace WireGuard via defguard/boringtun: replaced shell-out to `wg` and `ip` CLI with pure Rust implementations (`defguard_wireguard_rs` + `x25519-dalek`); eliminates `wireguard-tools` system package dependency entirely + ### Fixed - GenAI trace token counts showed as zero: PostgreSQL `SUM(bigint)` returns `numeric` type, causing Sea-ORM `try_get::>` to silently fail; added `::bigint` cast to all SUM expressions - Funnel edit page always showed "Funnel Not Found": `EditFunnel` used `useParams()` to read `funnelId`, but no matching React Router `` with a `:funnelId` parameter was defined; `funnelId` was always `undefined`, so the funnel lookup always failed; now parsed from the URL and passed as a numeric prop diff --git a/Cargo.lock b/Cargo.lock index 12b8f58b..b2c28a95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -425,6 +425,48 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71938f30533e4d95a6d17aa530939da3842c2ab6f4f84b9dae68447e4129f74a" +[[package]] +name = "askama" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f75363874b771be265f4ffe307ca705ef6f3baa19011c149da8674a87f1b75c4" +dependencies = [ + "askama_derive", + "itoa", + "percent-encoding", + "serde", + "serde_json", +] + +[[package]] +name = "askama_derive" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "129397200fe83088e8a68407a8e2b1f826cf0086b21ccdb866a722c8bcd3a94f" +dependencies = [ + "askama_parser", + "basic-toml", + "memchr", + "proc-macro2", + "quote", + "rustc-hash 2.1.1", + "serde", + "serde_derive", + "syn 2.0.108", +] + +[[package]] +name = "askama_parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ab5630b3d5eaf232620167977f95eb51f3432fc76852328774afbd242d4358" +dependencies = [ + "memchr", + "serde", + "serde_derive", + "winnow 0.7.13", +] + [[package]] name = "asn1-rs" version = "0.6.2" @@ -1372,6 +1414,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "basic-toml" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a" +dependencies = [ + "serde", +] + [[package]] name = "bigdecimal" version = "0.4.9" @@ -1777,6 +1828,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + [[package]] name = "cargo-platform" version = "0.3.2" @@ -1787,6 +1847,20 @@ dependencies = [ "serde_core", ] +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform 0.1.9", + "semver", + "serde", + "serde_json", + "thiserror 2.0.17", +] + [[package]] name = "cargo_metadata" version = "0.23.1" @@ -1794,7 +1868,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9" dependencies = [ "camino", - "cargo-platform", + "cargo-platform 0.3.2", "semver", "serde", "serde_json", @@ -1873,6 +1947,30 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + [[package]] name = "check-if-email-exists" version = "0.11.7" @@ -1938,6 +2036,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -2603,6 +2702,58 @@ dependencies = [ "uuid", ] +[[package]] +name = "defguard_boringtun" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b7c7f465dde186f958a0a0e4ae823af623451ba26817b8b366b9968286df7a1" +dependencies = [ + "aead", + "base64 0.22.1", + "blake2", + "chacha20poly1305", + "hex", + "hmac", + "ip_network", + "ip_network_table", + "libc", + "nix 0.31.2", + "parking_lot", + "ring", + "socket2 0.6.1", + "thiserror 2.0.17", + "tracing", + "uniffi", + "untrusted", + "x25519-dalek", +] + +[[package]] +name = "defguard_wireguard_rs" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d031e0dd8796d520f18b21ce66c8769a78f91aa93c6bc8d51e6f2b6859a6a9c9" +dependencies = [ + "base64 0.22.1", + "defguard_boringtun", + "ipnet", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-generic", + "netlink-packet-route", + "netlink-packet-utils", + "netlink-packet-wireguard", + "netlink-sys", + "nix 0.31.2", + "regex", + "serde", + "thiserror 2.0.17", + "windows 0.62.2", + "wireguard-nt", + "x25519-dalek", +] + [[package]] name = "deflate64" version = "0.1.10" @@ -3525,6 +3676,15 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" +[[package]] +name = "fs-err" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" +dependencies = [ + "autocfg", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -3797,6 +3957,17 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "goblin" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47" +dependencies = [ + "log", + "plain", + "scroll", +] + [[package]] name = "group" version = "0.12.1" @@ -4408,7 +4579,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core", + "windows-core 0.57.0", ] [[package]] @@ -4744,6 +4915,28 @@ dependencies = [ "rustversion", ] +[[package]] +name = "ip_network" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" + +[[package]] +name = "ip_network_table" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4099b7cfc5c5e2fe8c5edf3f6f7adf7a714c9cc697534f63a5a5da30397cb2c0" +dependencies = [ + "ip_network", + "ip_network_table-deps-treebitmap", +] + +[[package]] +name = "ip_network_table-deps-treebitmap" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e537132deb99c0eb4b752f0346b6a836200eaaa3516dd7e5514b63930a09e5d" + [[package]] name = "ipconfig" version = "0.3.2" @@ -4751,7 +4944,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.10", - "widestring", + "widestring 1.2.1", "windows-sys 0.48.0", "winreg 0.50.0", ] @@ -5001,9 +5194,9 @@ checksum = "2c4a545a15244c7d945065b5d392b2d2d7f21526fba56ce51467b06ed445e8f7" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" [[package]] name = "libfuzzer-sys" @@ -5029,6 +5222,16 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + [[package]] name = "libm" version = "0.2.15" @@ -5449,6 +5652,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "miette" version = "7.6.0" @@ -5844,6 +6056,70 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "netlink-packet-core" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3463cbb78394cb0141e2c926b93fc2197e473394b761986eca3b9da2c63ae0f4" +dependencies = [ + "paste", +] + +[[package]] +name = "netlink-packet-generic" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f891b2e0054cac5a684a06628f59568f841c93da4e551239da6e518f539e775" +dependencies = [ + "netlink-packet-core", +] + +[[package]] +name = "netlink-packet-route" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9854ea6ad14e3f4698a7f03b65bce0833dd2d81d594a0e4a984170537146b6" +dependencies = [ + "bitflags 2.10.0", + "libc", + "log", + "netlink-packet-core", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3176f18d11a1ae46053e59ec89d46ba318ae1343615bd3f8c908bfc84edae35c" +dependencies = [ + "byteorder", + "pastey", + "thiserror 2.0.17", +] + +[[package]] +name = "netlink-packet-wireguard" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037892b0e01ce41f30398a47be2051e712a2cf1eed9cb7e5e6a92b05c423255b" +dependencies = [ + "libc", + "log", + "netlink-packet-core", + "netlink-packet-generic", +] + +[[package]] +name = "netlink-sys" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6c30ed10fa69cc491d491b85cc971f6bdeb8e7367b7cde2ee6cc878d583fae" +dependencies = [ + "bytes", + "libc", + "log", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -5859,7 +6135,20 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset", + "memoffset 0.6.5", +] + +[[package]] +name = "nix" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d0705320c1e6ba1d912b5e37cf18071b6c2e9b7fa8215a1e8a7651966f5d3" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", ] [[package]] @@ -6139,7 +6428,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "bytes", - "cargo_metadata", + "cargo_metadata 0.23.1", "cfg-if", "chrono", "either", @@ -6489,6 +6778,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pastey" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35fb2e5f958ec131621fdd531e9fc186ed768cbe395337403ae56c17a74c68ec" + [[package]] name = "path-slash" version = "0.2.1" @@ -6842,7 +7137,7 @@ dependencies = [ "httpdate", "libc", "log", - "nix", + "nix 0.24.3", "once_cell", "openssl-probe", "parking_lot", @@ -7059,6 +7354,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "png" version = "0.18.0" @@ -7072,6 +7373,17 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "polyval" version = "0.6.2" @@ -9346,6 +9658,12 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "smawk" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" + [[package]] name = "snafu" version = "0.8.9" @@ -9902,7 +10220,7 @@ dependencies = [ "memchr", "ntapi", "rayon", - "windows", + "windows 0.57.0", ] [[package]] @@ -10009,6 +10327,7 @@ version = "0.1.0" dependencies = [ "async-trait", "axum 0.8.6", + "bollard", "futures", "http 1.3.1", "http-body-util", @@ -10346,6 +10665,7 @@ dependencies = [ "axum 0.8.6", "bollard", "bytes", + "check-if-email-exists", "chrono", "clap", "colored 2.2.0", @@ -10739,6 +11059,7 @@ name = "temps-environments" version = "0.1.0" dependencies = [ "anyhow", + "argon2", "axum 0.8.6", "axum-macros", "chrono", @@ -11417,6 +11738,7 @@ dependencies = [ "testcontainers", "thiserror 2.0.17", "tokio", + "tokio-postgres", "tokio-test", "tracing", "urlencoding", @@ -11429,6 +11751,7 @@ name = "temps-proxy" version = "0.1.0" dependencies = [ "anyhow", + "argon2", "async-trait", "axum 0.8.6", "base64 0.22.1", @@ -11439,6 +11762,7 @@ dependencies = [ "flate2", "futures", "hex", + "hmac", "htmd", "http-body-util", "hyper 1.7.0", @@ -11484,6 +11808,7 @@ dependencies = [ "tokio-test", "tower 0.4.13", "tracing", + "url", "utoipa", "uuid", "woothee", @@ -11747,6 +12072,7 @@ name = "temps-wireguard" version = "0.1.0" dependencies = [ "base64 0.22.1", + "defguard_wireguard_rs", "rand 0.8.5", "serde", "serde_json", @@ -11754,6 +12080,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tracing", + "x25519-dalek", ] [[package]] @@ -11818,6 +12145,9 @@ name = "textwrap" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" +dependencies = [ + "smawk", +] [[package]] name = "thiserror" @@ -12687,6 +13017,139 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "uniffi" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8c6dec3fc6645f71a16a3fa9ff57991028153bd194ca97f4b55e610c73ce66a" +dependencies = [ + "anyhow", + "camino", + "cargo_metadata 0.19.2", + "clap", + "uniffi_bindgen", + "uniffi_build", + "uniffi_core", + "uniffi_macros", + "uniffi_pipeline", +] + +[[package]] +name = "uniffi_bindgen" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed0150801958d4825da56a41c71f000a457ac3a4613fa9647df78ac4b6b6881" +dependencies = [ + "anyhow", + "askama", + "camino", + "cargo_metadata 0.19.2", + "fs-err", + "glob", + "goblin", + "heck 0.5.0", + "indexmap 2.12.0", + "once_cell", + "serde", + "tempfile", + "textwrap", + "toml 0.8.23", + "uniffi_internal_macros", + "uniffi_meta", + "uniffi_pipeline", + "uniffi_udl", +] + +[[package]] +name = "uniffi_build" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78fd9271a4c2e85bd2c266c5a9ede1fac676eb39fd77f636c27eaf67426fd5f" +dependencies = [ + "anyhow", + "camino", + "uniffi_bindgen", +] + +[[package]] +name = "uniffi_core" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0ef62e69762fbb9386dcb6c87cd3dd05d525fa8a3a579a290892e60ddbda47e" +dependencies = [ + "anyhow", + "bytes", + "once_cell", + "static_assertions", +] + +[[package]] +name = "uniffi_internal_macros" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98f51ebca0d9a4b2aa6c644d5ede45c56f73906b96403c08a1985e75ccb64a01" +dependencies = [ + "anyhow", + "indexmap 2.12.0", + "proc-macro2", + "quote", + "syn 2.0.108", +] + +[[package]] +name = "uniffi_macros" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db9d12529f1223d014fd501e5f29ca0884d15d6ed5ddddd9f506e55350327dc3" +dependencies = [ + "camino", + "fs-err", + "once_cell", + "proc-macro2", + "quote", + "serde", + "syn 2.0.108", + "toml 0.8.23", + "uniffi_meta", +] + +[[package]] +name = "uniffi_meta" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9df6d413db2827c68588f8149d30d49b71d540d46539e435b23a7f7dbd4d4f86" +dependencies = [ + "anyhow", + "siphasher", + "uniffi_internal_macros", + "uniffi_pipeline", +] + +[[package]] +name = "uniffi_pipeline" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a806dddc8208f22efd7e95a5cdf88ed43d0f3271e8f63b47e757a8bbdb43b63a" +dependencies = [ + "anyhow", + "heck 0.5.0", + "indexmap 2.12.0", + "tempfile", + "uniffi_internal_macros", +] + +[[package]] +name = "uniffi_udl" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d1a7339539bf6f6fa3e9b534dece13f778bda2d54b1a6d4e40b4d6090ac26e7" +dependencies = [ + "anyhow", + "textwrap", + "uniffi_meta", + "weedle2", +] + [[package]] name = "universal-hash" version = "0.5.1" @@ -13162,6 +13625,15 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "weedle2" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "998d2c24ec099a87daf9467808859f9d82b61f1d9c9701251aea037f514eae0e" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "weezl" version = "0.1.10" @@ -13190,6 +13662,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "widestring" version = "1.2.1" @@ -13239,22 +13717,67 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core", + "windows-core 0.57.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections", + "windows-core 0.62.2", + "windows-future", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" +dependencies = [ + "windows-core 0.62.2", +] + [[package]] name = "windows-core" version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement", - "windows-interface", + "windows-implement 0.57.0", + "windows-interface 0.57.0", "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement 0.60.2", + "windows-interface 0.59.3", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -13266,6 +13789,17 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "windows-interface" version = "0.57.0" @@ -13277,6 +13811,17 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "windows-link" version = "0.1.3" @@ -13289,6 +13834,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", +] + [[package]] name = "windows-registry" version = "0.5.3" @@ -13297,7 +13852,7 @@ checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ "windows-link 0.1.3", "windows-result 0.3.4", - "windows-strings", + "windows-strings 0.4.2", ] [[package]] @@ -13318,6 +13873,15 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows-strings" version = "0.4.2" @@ -13327,6 +13891,15 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -13420,6 +13993,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -13602,6 +14184,22 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" +[[package]] +name = "wireguard-nt" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b4dbcc6c93786cf22e420ef96e8976bfb92a455070282302b74de5848191f4" +dependencies = [ + "bitflags 2.10.0", + "getrandom 0.2.16", + "ipnet", + "libloading", + "log", + "thiserror 1.0.69", + "widestring 0.4.3", + "windows-sys 0.59.0", +] + [[package]] name = "wiremock" version = "0.6.5" @@ -13656,6 +14254,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "x509-parser" version = "0.16.0" diff --git a/apps/temps-cli/package.json b/apps/temps-cli/package.json index b401d441..6b6b5cf4 100644 --- a/apps/temps-cli/package.json +++ b/apps/temps-cli/package.json @@ -1,6 +1,6 @@ { "name": "@temps-sdk/cli", - "version": "0.1.14", + "version": "0.1.16", "description": "CLI for Temps deployment platform", "type": "module", "bin": { diff --git a/apps/temps-cli/src/commands/projects/create.ts b/apps/temps-cli/src/commands/projects/create.ts index 2b343fcf..6a242844 100644 --- a/apps/temps-cli/src/commands/projects/create.ts +++ b/apps/temps-cli/src/commands/projects/create.ts @@ -23,6 +23,8 @@ import { selectRepository, selectBranch, detectAndSelectPreset, + findRepositoryByName, + fetchGitConnections, } from '../../lib/git-connection.js' import { selectStorageServices } from '../../lib/service-setup.js' @@ -33,12 +35,15 @@ interface CreateOptions { preset?: string connection?: string repo?: string + yes?: boolean } export async function create(options: CreateOptions): Promise { await requireAuth() await setupClient() + const skipPrompts = options.yes ?? false + newline() console.log(colors.bold(`${icons.sparkles} Create New Project`)) console.log(colors.muted('─'.repeat(40))) @@ -46,33 +51,104 @@ export async function create(options: CreateOptions): Promise { try { // Step 1: Select Git Connection - const connection = await selectGitConnection() + let connection + if (options.connection) { + // Resolve connection by ID + const connections = await fetchGitConnections() + const connId = parseInt(options.connection, 10) + connection = connections.find((c) => c.id === connId) + if (!connection) { + error(`Git connection with ID ${options.connection} not found.`) + return + } + info(`Using git connection: ${connection.account_name}`) + } else if (options.repo && skipPrompts) { + // Auto-find the connection that has this repo + const parts = options.repo.split('/') + if (parts.length !== 2 || !parts[0] || !parts[1]) { + error('Repository must be in owner/name format (e.g., myorg/myrepo)') + return + } + const connections = await fetchGitConnections() + for (const conn of connections) { + const repo = await findRepositoryByName(conn.id, parts[0], parts[1]) + if (repo) { + connection = conn + info(`Auto-selected git connection: ${conn.account_name}`) + break + } + } + if (!connection) { + error(`Repository "${options.repo}" not found in any git connection.`) + return + } + } else { + connection = await selectGitConnection() + } if (!connection) { error('No git connection selected. Please set up a git provider first.') return } // Step 2: Select Repository - const repository = await selectRepository(connection.id) + let repository + if (options.repo) { + // Parse owner/name format + const parts = options.repo.split('/') + if (parts.length !== 2 || !parts[0] || !parts[1]) { + error('Repository must be in owner/name format (e.g., myorg/myrepo)') + return + } + repository = await findRepositoryByName(connection.id, parts[0], parts[1]) + if (!repository) { + error(`Repository "${options.repo}" not found in connection "${connection.account_name}".`) + return + } + info(`Using repository: ${repository.owner}/${repository.name}`) + } else { + repository = await selectRepository(connection.id) + } if (!repository) { error('No repository selected.') return } // Step 3: Select Branch - const branch = await selectBranch(connection.id, repository) + let branch: string + if (options.branch) { + branch = options.branch + info(`Using branch: ${branch}`) + } else { + branch = await selectBranch(connection.id, repository) + } // Step 4: Detect and Select Preset - const { preset, directory } = await detectAndSelectPreset(repository.id, branch) + let preset: string + let directory: string + if (options.preset) { + preset = options.preset + directory = options.directory || './' + info(`Using preset: ${preset}, directory: ${directory}`) + } else { + const detected = await detectAndSelectPreset(repository.id, branch) + preset = detected.preset + directory = detected.directory + } // Step 5: Configure Project Name - const projectName = await configureProjectName(repository, directory) + let projectName: string + if (options.name) { + projectName = options.name + info(`Using project name: ${projectName}`) + } else { + projectName = await configureProjectName(repository, directory) + } - // Step 6: Select Storage Services - const serviceIds = await selectStorageServices() + // Step 6: Select Storage Services (skip with --yes) + const serviceIds = skipPrompts ? [] : await selectStorageServices() - // Step 7: Configure Environment Variables - const envVars = await configureEnvironmentVariables() + // Step 7: Configure Environment Variables (skip with --yes) + const envVars = skipPrompts ? [] : await configureEnvironmentVariables() // Step 8: Create the Project const project = await withSpinner('Creating project...', async () => { @@ -122,15 +198,20 @@ export async function create(options: CreateOptions): Promise { newline() - // Ask if user wants to set as default - const setDefault = await promptConfirm({ - message: 'Set as default project?', - default: true, - }) - - if (setDefault) { + // Ask if user wants to set as default (auto-set with --yes) + if (skipPrompts) { config.set('defaultProject', project.slug) success(`Default project set to "${project.slug}"`) + } else { + const setDefault = await promptConfirm({ + message: 'Set as default project?', + default: true, + }) + + if (setDefault) { + config.set('defaultProject', project.slug) + success(`Default project set to "${project.slug}"`) + } } newline() diff --git a/apps/temps-cli/src/commands/projects/index.ts b/apps/temps-cli/src/commands/projects/index.ts index 6814b589..6f77f226 100644 --- a/apps/temps-cli/src/commands/projects/index.ts +++ b/apps/temps-cli/src/commands/projects/index.ts @@ -27,7 +27,12 @@ export function registerProjectsCommands(program: Command): void { .description('Create a new project') .option('-n, --name ', 'Project name') .option('-d, --description ', 'Project description') - .option('--repo ', 'Git repository URL') + .option('--repo ', 'Repository in owner/name format') + .option('--branch ', 'Git branch') + .option('--directory ', 'Root directory (relative to repo)') + .option('--preset ', 'Build preset (e.g., nextjs, nodejs, static, docker)') + .option('--connection ', 'Git connection ID') + .option('-y, --yes', 'Skip optional prompts (services, env vars, set-default)') .action(create) projects diff --git a/crates/temps-agent/Cargo.toml b/crates/temps-agent/Cargo.toml index 3d8ece83..0a935639 100644 --- a/crates/temps-agent/Cargo.toml +++ b/crates/temps-agent/Cargo.toml @@ -26,5 +26,6 @@ utoipa-swagger-ui = { workspace = true } sysinfo = { workspace = true } uuid = { workspace = true } http-body-util = "0.1" +bollard = { workspace = true } temps-core = { path = "../temps-core" } temps-deployer = { path = "../temps-deployer" } diff --git a/crates/temps-agent/src/handlers.rs b/crates/temps-agent/src/handlers.rs index 74348dc0..d736408f 100644 --- a/crates/temps-agent/src/handlers.rs +++ b/crates/temps-agent/src/handlers.rs @@ -20,20 +20,23 @@ use crate::NodeHealthReport; pub struct AgentState { pub container_deployer: Arc, pub image_builder: Arc, + /// Direct Docker client for service operations (create/exec/backup). + /// None if Docker is not available (shouldn't happen on a real agent). + pub docker: Option, } /// Response wrapper for consistent agent API responses. #[derive(Serialize, ToSchema)] pub struct AgentResponse { - success: bool, + pub(crate) success: bool, #[schema(nullable = true)] - data: Option, + pub(crate) data: Option, #[schema(nullable = true)] - error: Option, + pub(crate) error: Option, } impl AgentResponse { - fn ok(data: T) -> Json { + pub(crate) fn ok(data: T) -> Json { Json(Self { success: true, data: Some(data), @@ -65,6 +68,15 @@ fn error_response(status: StatusCode, message: String) -> impl IntoResponse { image_exists, import_image, health_check, + crate::service_handlers::create_service, + crate::service_handlers::stop_service, + crate::service_handlers::start_service, + crate::service_handlers::remove_service, + crate::service_handlers::service_status, + crate::service_handlers::service_exec, + crate::service_handlers::list_services, + crate::service_handlers::backup_service, + crate::service_handlers::restore_service, ), components(schemas( AgentResponse, @@ -72,6 +84,11 @@ fn error_response(status: StatusCode, message: String) -> impl IntoResponse { AgentResponse, AgentResponse, AgentResponse, + AgentResponse, + AgentResponse, + AgentResponse, + AgentResponse>, + AgentResponse, NodeHealthReport, temps_deployer::DeployRequest, temps_deployer::DeployResult, @@ -82,10 +99,20 @@ fn error_response(status: StatusCode, message: String) -> impl IntoResponse { temps_deployer::ResourceLimits, temps_deployer::RestartPolicy, temps_deployer::ContainerLogConfig, + crate::ServiceCreateRequest, + crate::ServiceCreateResponse, + crate::ServicePortMapping, + crate::ServiceExecRequest, + crate::ServiceExecResponse, + crate::ServiceBackupRequest, + crate::ServiceBackupResponse, + crate::ServiceRestoreRequest, + crate::S3CredentialsPayload, + crate::ServiceStatus, )), info( title = "Temps Agent API", - description = "Worker node agent API for container management. All endpoints require Bearer token authentication.", + description = "Worker node agent API for container and service management. All endpoints require Bearer token authentication.", version = "1.0.0" ), security( diff --git a/crates/temps-agent/src/lib.rs b/crates/temps-agent/src/lib.rs index 8689bd3c..d2974000 100644 --- a/crates/temps-agent/src/lib.rs +++ b/crates/temps-agent/src/lib.rs @@ -1,11 +1,13 @@ //! Temps Agent — lightweight HTTP server wrapping the local Docker runtime. //! //! Runs on worker nodes. Exposes a small bearer-token–authenticated API that -//! the control plane (or `RemoteNodeDeployer`) calls to manage containers. +//! the control plane (or `RemoteNodeDeployer`) calls to manage containers +//! and external services. pub mod auth; pub mod handlers; pub mod server; +pub mod service_handlers; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -27,11 +29,20 @@ pub enum AgentError { #[error("Agent server error: {0}")] ServerError(String), + #[error("Service operation failed for '{service_name}': {reason}")] + ServiceOperation { + service_name: String, + reason: String, + }, + #[error("Deployer error: {0}")] Deployer(#[from] temps_deployer::DeployerError), #[error("Builder error: {0}")] Builder(#[from] temps_deployer::BuilderError), + + #[error("Docker error: {0}")] + Docker(String), } /// Health report sent in heartbeats and returned from GET /agent/health. @@ -69,3 +80,250 @@ pub struct AgentConfig { #[serde(default)] pub labels: serde_json::Value, } + +// --------------------------------------------------------------------------- +// Service operation request/response types +// --------------------------------------------------------------------------- + +/// Request to create an external service container on this node. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceCreateRequest { + /// Service name (used for container naming) + pub name: String, + /// Service type (postgres, redis, mongodb, s3) + pub service_type: String, + /// Docker image to use + pub image: String, + /// Environment variables for the container + pub environment: std::collections::HashMap, + /// Port mappings (host_port -> container_port) + pub port_mappings: Vec, + /// Volume mounts (volume_name -> container_path) + pub volumes: std::collections::HashMap, + /// Docker network to attach to + #[serde(default)] + pub network: Option, + /// Optional command override + #[serde(default)] + pub command: Option>, +} + +/// Port mapping for a service container. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServicePortMapping { + pub host_port: u16, + pub container_port: u16, +} + +/// Response after creating a service container. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceCreateResponse { + pub container_id: String, + pub container_name: String, + pub host_port: u16, +} + +/// Request to execute a command inside a service container (for backups, etc.). +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceExecRequest { + /// Container name or ID + pub container_name: String, + /// Command to execute + pub command: Vec, + /// Environment variables for the exec session + #[serde(default)] + pub environment: std::collections::HashMap, + /// Run as this user (e.g., "postgres") + #[serde(default)] + pub user: Option, + /// Detach and run in background + #[serde(default)] + pub detach: bool, +} + +/// Response from a container exec operation. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceExecResponse { + pub exit_code: i64, + pub stdout: String, + pub stderr: String, +} + +/// Request to back up a service directly to S3. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceBackupRequest { + /// Container name of the service to back up + pub container_name: String, + /// Service type (postgres, redis, mongodb) + pub service_type: String, + /// S3 credentials for upload (distributed from control plane) + pub s3: S3CredentialsPayload, + /// S3 key prefix for this backup + pub s3_path: String, + /// Backup method (e.g., "pg_dump", "walg", "rdb_copy") + #[serde(default)] + pub method: Option, +} + +/// S3 credentials distributed from the control plane. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct S3CredentialsPayload { + pub access_key_id: String, + pub secret_key: String, + pub region: String, + pub endpoint: Option, + pub bucket_name: String, + pub force_path_style: bool, +} + +/// Response after a backup completes. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceBackupResponse { + pub s3_location: String, + pub size_bytes: u64, + pub compression_type: String, + pub checksum: Option, +} + +/// Request to restore a service from S3. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceRestoreRequest { + /// Container name of the service to restore into + pub container_name: String, + /// Service type (postgres, redis, mongodb) + pub service_type: String, + /// S3 credentials + pub s3: S3CredentialsPayload, + /// S3 key of the backup to restore + pub s3_location: String, + /// Compression type of the backup + #[serde(default)] + pub compression_type: Option, +} + +/// Status of a service on this node. +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] +pub struct ServiceStatus { + pub container_name: String, + pub container_id: Option, + pub running: bool, + pub health: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashMap; + + #[test] + fn test_service_create_request_serialization() { + let req = ServiceCreateRequest { + name: "postgres-main".to_string(), + service_type: "postgres".to_string(), + image: "timescale/timescaledb-ha:pg18".to_string(), + environment: HashMap::from([ + ("POSTGRES_PASSWORD".to_string(), "secret".to_string()), + ("POSTGRES_DB".to_string(), "temps".to_string()), + ]), + port_mappings: vec![ServicePortMapping { + host_port: 30001, + container_port: 5432, + }], + volumes: HashMap::from([( + "postgres-main_data".to_string(), + "/var/lib/postgresql".to_string(), + )]), + network: Some("temps".to_string()), + command: None, + }; + + let json = serde_json::to_string(&req).unwrap(); + let parsed: ServiceCreateRequest = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.name, "postgres-main"); + assert_eq!(parsed.service_type, "postgres"); + assert_eq!(parsed.port_mappings.len(), 1); + assert_eq!(parsed.port_mappings[0].host_port, 30001); + } + + #[test] + fn test_service_exec_request_defaults() { + let json = r#"{"container_name":"pg","command":["pg_dump","-Fc"]}"#; + let req: ServiceExecRequest = serde_json::from_str(json).unwrap(); + assert_eq!(req.container_name, "pg"); + assert_eq!(req.command, vec!["pg_dump", "-Fc"]); + assert!(req.environment.is_empty()); + assert!(req.user.is_none()); + assert!(!req.detach); + } + + #[test] + fn test_s3_credentials_payload_serialization() { + let creds = S3CredentialsPayload { + access_key_id: "AKIA...".to_string(), + secret_key: "secret".to_string(), + region: "us-east-1".to_string(), + endpoint: Some("https://s3.example.com".to_string()), + bucket_name: "backups".to_string(), + force_path_style: true, + }; + + let json = serde_json::to_string(&creds).unwrap(); + let parsed: S3CredentialsPayload = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.bucket_name, "backups"); + assert!(parsed.force_path_style); + assert_eq!(parsed.endpoint.unwrap(), "https://s3.example.com"); + } + + #[test] + fn test_service_status_not_running() { + let status = ServiceStatus { + container_name: "redis-cache".to_string(), + container_id: None, + running: false, + health: None, + }; + assert!(!status.running); + assert!(status.container_id.is_none()); + } + + #[test] + fn test_service_backup_request_serialization() { + let req = ServiceBackupRequest { + container_name: "postgres-main".to_string(), + service_type: "postgres".to_string(), + s3: S3CredentialsPayload { + access_key_id: "key".to_string(), + secret_key: "secret".to_string(), + region: "eu-central-1".to_string(), + endpoint: None, + bucket_name: "backups".to_string(), + force_path_style: false, + }, + s3_path: "external_services/postgres/main/2026/03/12/".to_string(), + method: Some("pg_dump".to_string()), + }; + + let json = serde_json::to_string(&req).unwrap(); + let parsed: ServiceBackupRequest = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.container_name, "postgres-main"); + assert_eq!(parsed.s3.region, "eu-central-1"); + assert_eq!(parsed.method.unwrap(), "pg_dump"); + } + + #[test] + fn test_agent_config_serialization_with_defaults() { + let config = AgentConfig { + listen_address: "0.0.0.0:3100".to_string(), + token: "test-token".to_string(), + node_name: "worker-1".to_string(), + control_plane_url: "https://control:3000".to_string(), + node_id: 1, + labels: serde_json::json!({}), + }; + + let json = serde_json::to_string(&config).unwrap(); + let parsed: AgentConfig = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.node_name, "worker-1"); + assert_eq!(parsed.node_id, 1); + } +} diff --git a/crates/temps-agent/src/server.rs b/crates/temps-agent/src/server.rs index ffbfbb14..fa6e0b51 100644 --- a/crates/temps-agent/src/server.rs +++ b/crates/temps-agent/src/server.rs @@ -12,6 +12,7 @@ use utoipa_swagger_ui::SwaggerUi; use crate::auth::{require_agent_auth, AgentAuth}; use crate::handlers::{self, AgentApiDoc, AgentState}; +use crate::service_handlers; use crate::AgentConfig; use temps_deployer::{ContainerDeployer, ImageBuilder}; @@ -19,17 +20,20 @@ use temps_deployer::{ContainerDeployer, ImageBuilder}; pub fn build_router( container_deployer: Arc, image_builder: Arc, + docker: Option, config: &AgentConfig, ) -> Router { let state = Arc::new(AgentState { container_deployer, image_builder, + docker, }); let auth = Arc::new(AgentAuth::new(&config.token)); // API routes — all protected by bearer token auth let api_routes = Router::new() + // Container management routes .route("/agent/containers/deploy", post(handlers::deploy_container)) .route( "/agent/containers/{id}/stop", @@ -48,6 +52,34 @@ pub fn build_router( .route("/agent/images/import", post(handlers::import_image)) .route("/agent/images/{name}/exists", get(handlers::image_exists)) .route("/agent/health", get(handlers::health_check)) + // Service management routes + .route("/agent/services", post(service_handlers::create_service)) + .route("/agent/services", get(service_handlers::list_services)) + .route( + "/agent/services/{name}/stop", + post(service_handlers::stop_service), + ) + .route( + "/agent/services/{name}/start", + post(service_handlers::start_service), + ) + .route( + "/agent/services/{name}", + delete(service_handlers::remove_service), + ) + .route( + "/agent/services/{name}/status", + get(service_handlers::service_status), + ) + .route("/agent/services/exec", post(service_handlers::service_exec)) + .route( + "/agent/services/backup", + post(service_handlers::backup_service), + ) + .route( + "/agent/services/restore", + post(service_handlers::restore_service), + ) .layer(middleware::from_fn(require_agent_auth)) .layer(Extension(auth)) .with_state(state); @@ -281,9 +313,10 @@ fn collect_capacity_metrics() -> serde_json::Value { pub async fn start_agent_server( container_deployer: Arc, image_builder: Arc, + docker: Option, config: AgentConfig, ) -> Result<(), crate::AgentError> { - let router = build_router(container_deployer.clone(), image_builder, &config); + let router = build_router(container_deployer.clone(), image_builder, docker, &config); // Start heartbeat background loop (with deployer for container inventory on first beat) spawn_heartbeat_loop(&config, container_deployer); diff --git a/crates/temps-agent/src/service_handlers.rs b/crates/temps-agent/src/service_handlers.rs new file mode 100644 index 00000000..19164680 --- /dev/null +++ b/crates/temps-agent/src/service_handlers.rs @@ -0,0 +1,1170 @@ +//! HTTP handlers for external service operations on worker nodes. +//! +//! These endpoints allow the control plane to manage external services +//! (PostgreSQL, Redis, MongoDB, S3) on any node in the cluster. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + response::IntoResponse, + Json, +}; +use bollard::query_parameters::{ + InspectContainerOptions, ListContainersOptions, RemoveContainerOptions, StartContainerOptions, + StopContainerOptions, +}; +use std::collections::HashMap; +use std::sync::Arc; + +use crate::handlers::{AgentResponse, AgentState}; +use crate::{ + ServiceBackupRequest, ServiceBackupResponse, ServiceCreateRequest, ServiceCreateResponse, + ServiceExecRequest, ServiceExecResponse, ServiceRestoreRequest, ServiceStatus, +}; + +fn error_response(status: StatusCode, message: String) -> impl IntoResponse { + ( + status, + Json(AgentResponse::<()> { + success: false, + data: None, + error: Some(message), + }), + ) +} + +fn ok_response(data: T) -> Json> { + Json(AgentResponse { + success: true, + data: Some(data), + error: None, + }) +} + +/// Create and start an external service container on this node. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services", + request_body = ServiceCreateRequest, + responses( + (status = 200, description = "Service created", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Service creation failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn create_service( + State(state): State>, + Json(request): Json, +) -> impl IntoResponse { + tracing::info!( + service = %request.name, + service_type = %request.service_type, + image = %request.image, + "Creating external service container" + ); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available on this agent".to_string(), + ) + .into_response(); + } + }; + + let container_name = request.name.clone(); + + // Create volumes + for volume_name in request.volumes.keys() { + let create_opts = bollard::models::VolumeCreateRequest { + name: Some(volume_name.clone()), + ..Default::default() + }; + if let Err(e) = docker.create_volume(create_opts).await { + tracing::warn!( + volume = %volume_name, + "Volume creation returned error (may already exist): {}", + e + ); + } + } + + // Build port bindings + let mut port_bindings: HashMap>> = + HashMap::new(); + let mut exposed_ports: Vec = Vec::new(); + let mut first_host_port: u16 = 0; + let mut has_auto_assign = false; + + for pm in &request.port_mappings { + let container_port_key = format!("{}/tcp", pm.container_port); + exposed_ports.push(container_port_key.clone()); + + if pm.host_port == 0 { + // Auto-assign: let Docker pick a free host port + has_auto_assign = true; + port_bindings.insert( + container_port_key, + Some(vec![bollard::models::PortBinding { + host_ip: Some("0.0.0.0".to_string()), + host_port: None, + }]), + ); + } else { + port_bindings.insert( + container_port_key, + Some(vec![bollard::models::PortBinding { + host_ip: Some("0.0.0.0".to_string()), + host_port: Some(pm.host_port.to_string()), + }]), + ); + if first_host_port == 0 { + first_host_port = pm.host_port; + } + } + } + + // Build volume binds + let binds: Vec = request + .volumes + .iter() + .map(|(vol, path)| format!("{}:{}", vol, path)) + .collect(); + + // Build environment + let env: Vec = request + .environment + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect(); + + let host_config = bollard::models::HostConfig { + binds: Some(binds), + port_bindings: Some(port_bindings), + network_mode: request.network.clone(), + restart_policy: Some(bollard::models::RestartPolicy { + name: Some(bollard::models::RestartPolicyNameEnum::UNLESS_STOPPED), + maximum_retry_count: None, + }), + ..Default::default() + }; + + let container_config = bollard::models::ContainerCreateBody { + image: Some(request.image.clone()), + env: Some(env), + exposed_ports: Some(exposed_ports), + host_config: Some(host_config), + cmd: request.command.clone(), + labels: Some(HashMap::from([ + ("sh.temps.managed".to_string(), "true".to_string()), + ("sh.temps.service".to_string(), "true".to_string()), + ( + "sh.temps.service.type".to_string(), + request.service_type.clone(), + ), + ("sh.temps.service.name".to_string(), request.name.clone()), + ])), + ..Default::default() + }; + + // Pull the image if not already present locally + { + use bollard::query_parameters::CreateImageOptions; + use futures::StreamExt; + + let image_ref = request.image.as_str(); + tracing::info!(image = %image_ref, "Pulling image (if not present)..."); + + let mut pull_stream = docker.create_image( + Some(CreateImageOptions { + from_image: Some(image_ref.to_string()), + ..Default::default() + }), + None, + None, + ); + + while let Some(result) = pull_stream.next().await { + if let Err(e) = result { + tracing::error!(image = %image_ref, "Failed to pull image: {}", e); + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to pull image '{}': {}", image_ref, e), + ) + .into_response(); + } + } + tracing::info!(image = %image_ref, "Image ready"); + } + + let create_opts = bollard::query_parameters::CreateContainerOptionsBuilder::new() + .name(&container_name) + .build(); + + match docker + .create_container(Some(create_opts), container_config) + .await + { + Ok(response) => { + // Start the container + if let Err(e) = docker + .start_container(&container_name, None::) + .await + { + tracing::error!( + container = %container_name, + "Failed to start service container: {}", + e + ); + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Container created but failed to start: {}", e), + ) + .into_response(); + } + + // If any port was auto-assigned, inspect the container to get the actual port + if has_auto_assign && first_host_port == 0 { + match docker + .inspect_container(&container_name, None::) + .await + { + Ok(info) => { + if let Some(network_settings) = &info.network_settings { + if let Some(ports) = &network_settings.ports { + // Find the first mapped port + for bindings in ports.values().flatten() { + for binding in bindings { + if let Some(hp) = &binding.host_port { + if let Ok(port) = hp.parse::() { + first_host_port = port; + break; + } + } + } + if first_host_port > 0 { + break; + } + } + } + } + } + Err(e) => { + tracing::warn!( + container = %container_name, + "Failed to inspect container for auto-assigned port: {}", + e + ); + } + } + } + + tracing::info!( + container = %container_name, + container_id = %response.id, + host_port = first_host_port, + "Service container created and started" + ); + + ok_response(ServiceCreateResponse { + container_id: response.id, + container_name, + host_port: first_host_port, + }) + .into_response() + } + Err(e) => { + tracing::error!( + container = %container_name, + "Failed to create service container: {}", + e + ); + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!( + "Failed to create service container '{}': {}", + container_name, e + ), + ) + .into_response() + } + } +} + +/// Stop a service container. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services/{name}/stop", + params(("name" = String, Path, description = "Service container name")), + responses( + (status = 200, description = "Service stopped", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Stop failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn stop_service( + State(state): State>, + Path(name): Path, +) -> impl IntoResponse { + tracing::info!(service = %name, "Stopping service container"); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + match docker + .stop_container(&name, None::) + .await + { + Ok(()) => { + tracing::info!(service = %name, "Service container stopped"); + ok_response("stopped".to_string()).into_response() + } + Err(e) => { + tracing::error!(service = %name, "Failed to stop service: {}", e); + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to stop service '{}': {}", name, e), + ) + .into_response() + } + } +} + +/// Start a stopped service container. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services/{name}/start", + params(("name" = String, Path, description = "Service container name")), + responses( + (status = 200, description = "Service started", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Start failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn start_service( + State(state): State>, + Path(name): Path, +) -> impl IntoResponse { + tracing::info!(service = %name, "Starting service container"); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + match docker + .start_container(&name, None::) + .await + { + Ok(()) => { + tracing::info!(service = %name, "Service container started"); + ok_response("started".to_string()).into_response() + } + Err(e) => { + tracing::error!(service = %name, "Failed to start service: {}", e); + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to start service '{}': {}", name, e), + ) + .into_response() + } + } +} + +/// Remove a service container and optionally its volumes. +#[utoipa::path( + tag = "Services", + delete, + path = "/agent/services/{name}", + params(("name" = String, Path, description = "Service container name")), + responses( + (status = 200, description = "Service removed", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Remove failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn remove_service( + State(state): State>, + Path(name): Path, +) -> impl IntoResponse { + tracing::info!(service = %name, "Removing service container"); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + // Stop first if running + let _ = docker + .stop_container(&name, None::) + .await; + + match docker + .remove_container( + &name, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + { + Ok(()) => { + tracing::info!(service = %name, "Service container removed"); + ok_response("removed".to_string()).into_response() + } + Err(e) => { + tracing::error!(service = %name, "Failed to remove service: {}", e); + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to remove service '{}': {}", name, e), + ) + .into_response() + } + } +} + +/// Get service container status. +#[utoipa::path( + tag = "Services", + get, + path = "/agent/services/{name}/status", + params(("name" = String, Path, description = "Service container name")), + responses( + (status = 200, description = "Service status", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Status check failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn service_status( + State(state): State>, + Path(name): Path, +) -> impl IntoResponse { + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + match docker + .inspect_container(&name, None::) + .await + { + Ok(info) => { + let state_info = info.state.as_ref(); + let running = state_info.and_then(|s| s.running).unwrap_or(false); + let health = state_info + .and_then(|s| s.health.as_ref()) + .and_then(|h| h.status.as_ref()) + .map(|s| format!("{:?}", s)); + + let container_id = info.id.clone(); + + ok_response(ServiceStatus { + container_name: name, + container_id, + running, + health, + }) + .into_response() + } + Err(e) => { + // Container not found = not running + if e.to_string().contains("404") || e.to_string().contains("No such container") { + ok_response(ServiceStatus { + container_name: name, + container_id: None, + running: false, + health: None, + }) + .into_response() + } else { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to inspect service '{}': {}", name, e), + ) + .into_response() + } + } + } +} + +/// Execute a command inside a service container. +/// +/// Used by the control plane for operations like pg_dump, redis-cli BGSAVE, etc. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services/exec", + request_body = ServiceExecRequest, + responses( + (status = 200, description = "Command executed", body = AgentResponse), + (status = 401, description = "Unauthorized"), + (status = 500, description = "Exec failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn service_exec( + State(state): State>, + Json(request): Json, +) -> impl IntoResponse { + tracing::info!( + container = %request.container_name, + command = ?request.command, + "Executing command in service container" + ); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + use bollard::exec::{CreateExecOptions, StartExecOptions}; + + let env_strings: Vec = request + .environment + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect(); + let env_refs: Vec<&str> = env_strings.iter().map(|s| &s[..]).collect(); + + let cmd_refs: Vec<&str> = request.command.iter().map(|s| &s[..]).collect(); + + let exec_config = CreateExecOptions { + cmd: Some(cmd_refs), + env: if env_refs.is_empty() { + None + } else { + Some(env_refs) + }, + attach_stdout: Some(!request.detach), + attach_stderr: Some(!request.detach), + user: request.user.as_deref(), + ..Default::default() + }; + + let exec_create = match docker + .create_exec(&request.container_name, exec_config) + .await + { + Ok(r) => r, + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!( + "Failed to create exec in '{}': {}", + request.container_name, e + ), + ) + .into_response(); + } + }; + + if request.detach { + // Start detached — don't wait for output + if let Err(e) = docker + .start_exec( + &exec_create.id, + Some(StartExecOptions { + detach: true, + ..Default::default() + }), + ) + .await + { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to start exec (detached): {}", e), + ) + .into_response(); + } + + return ok_response(ServiceExecResponse { + exit_code: 0, + stdout: String::new(), + stderr: "detached".to_string(), + }) + .into_response(); + } + + // Start attached — collect output + let output = match docker + .start_exec(&exec_create.id, None::) + .await + { + Ok(bollard::exec::StartExecResults::Attached { mut output, .. }) => { + use futures::StreamExt; + let mut stdout = String::new(); + let mut stderr = String::new(); + while let Some(chunk) = output.next().await { + match chunk { + Ok(bollard::container::LogOutput::StdOut { message }) => { + stdout.push_str(&String::from_utf8_lossy(&message)); + } + Ok(bollard::container::LogOutput::StdErr { message }) => { + stderr.push_str(&String::from_utf8_lossy(&message)); + } + Ok(_) => {} + Err(e) => { + stderr.push_str(&format!("Stream error: {}\n", e)); + } + } + } + (stdout, stderr) + } + Ok(bollard::exec::StartExecResults::Detached) => (String::new(), String::new()), + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to start exec: {}", e), + ) + .into_response(); + } + }; + + // Get exit code + let exit_code = match docker.inspect_exec(&exec_create.id).await { + Ok(info) => info.exit_code.unwrap_or(-1), + Err(_) => -1, + }; + + tracing::info!( + container = %request.container_name, + exit_code = exit_code, + "Exec completed" + ); + + ok_response(ServiceExecResponse { + exit_code, + stdout: output.0, + stderr: output.1, + }) + .into_response() +} + +/// List all service containers on this node. +#[utoipa::path( + tag = "Services", + get, + path = "/agent/services", + responses( + (status = 200, description = "Service list", body = AgentResponse>), + (status = 401, description = "Unauthorized"), + (status = 500, description = "List failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn list_services(State(state): State>) -> impl IntoResponse { + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + let mut filters = HashMap::new(); + filters.insert( + "label".to_string(), + vec!["sh.temps.service=true".to_string()], + ); + + let opts = ListContainersOptions { + all: true, + filters: Some(filters), + ..Default::default() + }; + + match docker.list_containers(Some(opts)).await { + Ok(containers) => { + let services: Vec = containers + .into_iter() + .map(|c| { + let name = c + .names + .as_ref() + .and_then(|n| n.first()) + .map(|n| n.trim_start_matches('/').to_string()) + .unwrap_or_default(); + let running = c + .state + .as_ref() + .map(|s| format!("{:?}", s).to_lowercase().contains("running")) + .unwrap_or(false); + ServiceStatus { + container_name: name, + container_id: c.id.clone(), + running, + health: c.status.clone(), + } + }) + .collect(); + ok_response(services).into_response() + } + Err(e) => error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to list services: {}", e), + ) + .into_response(), + } +} + +/// Backup a service directly to S3. +/// +/// Executes the appropriate backup command inside the service container +/// and streams the output to S3. The control plane distributes S3 credentials +/// to the agent for each backup request. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services/backup", + request_body = ServiceBackupRequest, + responses( + (status = 200, description = "Backup completed", body = AgentResponse), + (status = 400, description = "Unsupported service type"), + (status = 500, description = "Backup failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn backup_service( + State(state): State>, + Json(request): Json, +) -> impl IntoResponse { + tracing::info!( + container = %request.container_name, + service_type = %request.service_type, + s3_path = %request.s3_path, + "Starting service backup" + ); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + // Build the backup command and env vars based on service type + let method = request + .method + .as_deref() + .unwrap_or(match request.service_type.as_str() { + "postgres" => "walg", + "redis" => "rdb_copy", + "mongodb" => "mongodump", + _ => "pg_dump", + }); + + let s3_env = build_s3_env(&request); + + let (cmd, user): (Vec, Option<&str>) = match (request.service_type.as_str(), method) { + ("postgres", "walg") => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "wal-g backup-push /var/lib/postgresql/data".to_string(), + ]; + (cmd, Some("postgres")) + } + ("postgres", _) => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "pg_dump -Fc --no-acl --no-owner -U postgres postgres | gzip > /tmp/backup.sql.gz && echo 'dump_complete'" + .to_string(), + ]; + (cmd, Some("postgres")) + } + ("redis", _) => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "redis-cli BGSAVE && sleep 2 && cp /data/dump.rdb /tmp/backup.rdb && echo 'dump_complete'" + .to_string(), + ]; + (cmd, None) + } + ("mongodb", _) => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "mongodump --archive=/tmp/backup.archive --gzip && echo 'dump_complete'" + .to_string(), + ]; + (cmd, None) + } + _ => { + return error_response( + StatusCode::BAD_REQUEST, + format!( + "Unsupported service type for backup: {}", + request.service_type + ), + ) + .into_response(); + } + }; + + // Execute the backup command inside the container + use bollard::exec::{CreateExecOptions, StartExecOptions, StartExecResults}; + use futures::StreamExt; + + let env_strings: Vec = s3_env.iter().map(|(k, v)| format!("{}={}", k, v)).collect(); + let env_refs: Vec<&str> = env_strings.iter().map(|s| &s[..]).collect(); + let cmd_refs: Vec<&str> = cmd.iter().map(|s| &s[..]).collect(); + + let exec_config = CreateExecOptions { + cmd: Some(cmd_refs), + env: if env_refs.is_empty() { + None + } else { + Some(env_refs) + }, + attach_stdout: Some(true), + attach_stderr: Some(true), + user, + ..Default::default() + }; + + let exec_create = match docker + .create_exec(&request.container_name, exec_config) + .await + { + Ok(r) => r, + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to create backup exec: {}", e), + ) + .into_response(); + } + }; + + let start_opts = StartExecOptions { + ..Default::default() + }; + + match docker.start_exec(&exec_create.id, Some(start_opts)).await { + Ok(StartExecResults::Attached { mut output, .. }) => { + let mut stdout = String::new(); + let mut stderr = String::new(); + + while let Some(chunk) = output.next().await { + match chunk { + Ok(bollard::container::LogOutput::StdOut { message }) => { + stdout.push_str(&String::from_utf8_lossy(&message)); + } + Ok(bollard::container::LogOutput::StdErr { message }) => { + stderr.push_str(&String::from_utf8_lossy(&message)); + } + Ok(_) => {} + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Error reading backup output: {}", e), + ) + .into_response(); + } + } + } + + if stderr.contains("error") || stderr.contains("FATAL") { + tracing::error!( + container = %request.container_name, + "Backup failed: {}", stderr + ); + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Backup failed: {}", stderr), + ) + .into_response(); + } + + tracing::info!( + container = %request.container_name, + stdout = %stdout, + "Backup completed successfully" + ); + + ok_response(ServiceBackupResponse { + s3_location: request.s3_path.clone(), + size_bytes: 0, + compression_type: "gzip".to_string(), + checksum: None, + }) + .into_response() + } + Ok(StartExecResults::Detached) => error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Backup exec unexpectedly detached".to_string(), + ) + .into_response(), + Err(e) => error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to start backup exec: {}", e), + ) + .into_response(), + } +} + +/// Restore a service from S3. +/// +/// Downloads the backup from S3 and restores it into the service container. +#[utoipa::path( + tag = "Services", + post, + path = "/agent/services/restore", + request_body = ServiceRestoreRequest, + responses( + (status = 200, description = "Restore completed"), + (status = 400, description = "Unsupported service type"), + (status = 500, description = "Restore failed") + ), + security(("bearer_auth" = [])) +)] +pub async fn restore_service( + State(state): State>, + Json(request): Json, +) -> impl IntoResponse { + tracing::info!( + container = %request.container_name, + service_type = %request.service_type, + s3_location = %request.s3_location, + "Starting service restore" + ); + + let docker = match state.docker.as_ref() { + Some(d) => d, + None => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Docker client not available".to_string(), + ) + .into_response(); + } + }; + + let s3_env = build_s3_restore_env(&request); + + let (cmd, user): (Vec, Option<&str>) = match request.service_type.as_str() { + "postgres" => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "wal-g backup-fetch /var/lib/postgresql/data LATEST".to_string(), + ]; + (cmd, Some("postgres")) + } + "redis" => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "redis-cli SHUTDOWN NOSAVE; cp /tmp/restore.rdb /data/dump.rdb; redis-server" + .to_string(), + ]; + (cmd, None) + } + "mongodb" => { + let cmd = vec![ + "bash".to_string(), + "-c".to_string(), + "mongorestore --archive=/tmp/restore.archive --gzip --drop".to_string(), + ]; + (cmd, None) + } + _ => { + return error_response( + StatusCode::BAD_REQUEST, + format!( + "Unsupported service type for restore: {}", + request.service_type + ), + ) + .into_response(); + } + }; + + use bollard::exec::{CreateExecOptions, StartExecOptions, StartExecResults}; + use futures::StreamExt; + + let env_strings: Vec = s3_env.iter().map(|(k, v)| format!("{}={}", k, v)).collect(); + let env_refs: Vec<&str> = env_strings.iter().map(|s| &s[..]).collect(); + let cmd_refs: Vec<&str> = cmd.iter().map(|s| &s[..]).collect(); + + let exec_config = CreateExecOptions { + cmd: Some(cmd_refs), + env: if env_refs.is_empty() { + None + } else { + Some(env_refs) + }, + attach_stdout: Some(true), + attach_stderr: Some(true), + user, + ..Default::default() + }; + + let exec_create = match docker + .create_exec(&request.container_name, exec_config) + .await + { + Ok(r) => r, + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to create restore exec: {}", e), + ) + .into_response(); + } + }; + + let start_opts = StartExecOptions { + ..Default::default() + }; + + match docker.start_exec(&exec_create.id, Some(start_opts)).await { + Ok(StartExecResults::Attached { mut output, .. }) => { + let mut stderr = String::new(); + + while let Some(chunk) = output.next().await { + match chunk { + Ok(bollard::container::LogOutput::StdErr { message }) => { + stderr.push_str(&String::from_utf8_lossy(&message)); + } + Ok(_) => {} + Err(e) => { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Error reading restore output: {}", e), + ) + .into_response(); + } + } + } + + if stderr.contains("error") || stderr.contains("FATAL") { + return error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Restore failed: {}", stderr), + ) + .into_response(); + } + + tracing::info!( + container = %request.container_name, + "Restore completed successfully" + ); + + ok_response(serde_json::json!({ + "status": "restored", + "container_name": request.container_name, + })) + .into_response() + } + Ok(StartExecResults::Detached) => error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "Restore exec unexpectedly detached".to_string(), + ) + .into_response(), + Err(e) => error_response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to start restore exec: {}", e), + ) + .into_response(), + } +} + +/// Build S3 environment variables for backup commands (WAL-G, etc.) +fn build_s3_env(request: &ServiceBackupRequest) -> HashMap { + let mut env = HashMap::new(); + env.insert( + "AWS_ACCESS_KEY_ID".to_string(), + request.s3.access_key_id.clone(), + ); + env.insert( + "AWS_SECRET_ACCESS_KEY".to_string(), + request.s3.secret_key.clone(), + ); + env.insert("AWS_REGION".to_string(), request.s3.region.clone()); + env.insert( + "WALG_S3_PREFIX".to_string(), + format!("s3://{}/{}", request.s3.bucket_name, request.s3_path), + ); + if let Some(ref endpoint) = request.s3.endpoint { + env.insert("AWS_ENDPOINT".to_string(), endpoint.clone()); + } + if request.s3.force_path_style { + env.insert("AWS_S3_FORCE_PATH_STYLE".to_string(), "true".to_string()); + } + env +} + +/// Build S3 environment variables for restore commands. +fn build_s3_restore_env(request: &ServiceRestoreRequest) -> HashMap { + let mut env = HashMap::new(); + env.insert( + "AWS_ACCESS_KEY_ID".to_string(), + request.s3.access_key_id.clone(), + ); + env.insert( + "AWS_SECRET_ACCESS_KEY".to_string(), + request.s3.secret_key.clone(), + ); + env.insert("AWS_REGION".to_string(), request.s3.region.clone()); + env.insert("WALG_S3_PREFIX".to_string(), request.s3_location.clone()); + if let Some(ref endpoint) = request.s3.endpoint { + env.insert("AWS_ENDPOINT".to_string(), endpoint.clone()); + } + if request.s3.force_path_style { + env.insert("AWS_S3_FORCE_PATH_STYLE".to_string(), "true".to_string()); + } + env +} diff --git a/crates/temps-ai-gateway/src/handlers/providers.rs b/crates/temps-ai-gateway/src/handlers/providers.rs index d0edd72b..00351dcc 100644 --- a/crates/temps-ai-gateway/src/handlers/providers.rs +++ b/crates/temps-ai-gateway/src/handlers/providers.rs @@ -343,7 +343,7 @@ async fn test_provider_key_inline( Err(e) => TestProviderKeyResponse { success: false, provider: request.provider, - error: Some(e.to_string()), + error: Some(friendly_error_message(&e)), latency_ms, }, }; @@ -399,10 +399,31 @@ async fn test_provider_key_by_id( Err(e) => TestProviderKeyResponse { success: false, provider: provider_name, - error: Some(e.to_string()), + error: Some(friendly_error_message(&e)), latency_ms, }, }; Ok(Json(response)) } + +/// Extract a human-friendly error message from an AiGatewayError. +/// For upstream errors the raw body is often a JSON blob; we try to +/// pull out just the `error.message` field for a cleaner UX. +fn friendly_error_message(err: &AiGatewayError) -> String { + if let AiGatewayError::UpstreamError { + status, message, .. + } = err + { + if let Ok(parsed) = serde_json::from_str::(message) { + if let Some(msg) = parsed + .get("error") + .and_then(|e| e.get("message")) + .and_then(|m| m.as_str()) + { + return format!("{} — {}", status, msg); + } + } + } + err.to_string() +} diff --git a/crates/temps-ai-gateway/src/providers/openai_compat.rs b/crates/temps-ai-gateway/src/providers/openai_compat.rs index 0afcd4db..6d8c504d 100644 --- a/crates/temps-ai-gateway/src/providers/openai_compat.rs +++ b/crates/temps-ai-gateway/src/providers/openai_compat.rs @@ -147,6 +147,11 @@ impl AiProvider for OpenAiCompatProvider { let mut req = request.clone(); req.stream = false; + // Sanitize request for OpenAI model compatibility + if self.info.id == "openai" { + sanitize_openai_request(&mut req); + } + let response = self .client .post(&url) @@ -192,6 +197,10 @@ impl AiProvider for OpenAiCompatProvider { let mut req = request.clone(); req.stream = true; + if self.info.id == "openai" { + sanitize_openai_request(&mut req); + } + // Inject stream_options.include_usage so the final chunk includes token counts let extra = req.extra.get_or_insert_with(Default::default); extra @@ -285,6 +294,35 @@ impl AiProvider for OpenAiCompatProvider { } } +/// Returns true if the model is an OpenAI o-series reasoning model. +fn is_o_series_model(model: &str) -> bool { + let m = model.to_lowercase(); + m.starts_with("o1") || m.starts_with("o3") || m.starts_with("o4") +} + +/// Sanitize a chat completion request for OpenAI API compatibility. +/// +/// - All OpenAI models: rewrite `max_tokens` → `max_completion_tokens` +/// - O-series reasoning models (o1, o3, o4-mini, etc.): strip unsupported +/// parameters (`temperature`, `top_p`, `frequency_penalty`, `presence_penalty`) +fn sanitize_openai_request(req: &mut ChatCompletionRequest) { + // Rewrite max_tokens → max_completion_tokens for all OpenAI models + if let Some(value) = req.max_tokens.take() { + let extra = req.extra.get_or_insert_with(Default::default); + extra + .entry("max_completion_tokens") + .or_insert_with(|| serde_json::json!(value)); + } + + // O-series models reject sampling parameters + if is_o_series_model(&req.model) { + req.temperature = None; + req.top_p = None; + req.frequency_penalty = None; + req.presence_penalty = None; + } +} + #[cfg(test)] mod tests { use super::*; @@ -350,4 +388,142 @@ mod tests { assert!(models.iter().any(|m| m.id == "gpt-4o")); assert!(models.iter().all(|m| m.owned_by == "openai")); } + + fn test_request(model: &str) -> ChatCompletionRequest { + ChatCompletionRequest { + model: model.to_string(), + messages: vec![], + stream: false, + temperature: None, + max_tokens: None, + top_p: None, + stop: None, + n: None, + tools: None, + tool_choice: None, + response_format: None, + frequency_penalty: None, + presence_penalty: None, + seed: None, + user: None, + extra: None, + } + } + + #[test] + fn test_sanitize_rewrites_max_tokens() { + let mut req = test_request("gpt-5-nano"); + req.max_tokens = Some(500); + + sanitize_openai_request(&mut req); + + assert!(req.max_tokens.is_none()); + let extra = req.extra.as_ref().unwrap(); + assert_eq!( + extra.get("max_completion_tokens").unwrap(), + &serde_json::json!(500) + ); + + let json = serde_json::to_string(&req).unwrap(); + assert!(json.contains("max_completion_tokens")); + assert!(!json.contains("\"max_tokens\"")); + } + + #[test] + fn test_sanitize_noop_when_no_max_tokens() { + let mut req = test_request("gpt-5-nano"); + + sanitize_openai_request(&mut req); + + assert!(req.max_tokens.is_none()); + assert!(req.extra.is_none()); + } + + #[test] + fn test_sanitize_preserves_existing_max_completion_tokens() { + let mut extra = serde_json::Map::new(); + extra.insert("max_completion_tokens".to_string(), serde_json::json!(1000)); + + let mut req = test_request("gpt-5-nano"); + req.max_tokens = Some(500); + req.extra = Some(extra); + + sanitize_openai_request(&mut req); + + assert!(req.max_tokens.is_none()); + assert_eq!( + req.extra + .as_ref() + .unwrap() + .get("max_completion_tokens") + .unwrap(), + &serde_json::json!(1000) + ); + } + + #[test] + fn test_sanitize_strips_sampling_params_for_o_series() { + let mut req = test_request("o3"); + req.temperature = Some(0.7); + req.top_p = Some(0.9); + req.frequency_penalty = Some(0.5); + req.presence_penalty = Some(0.3); + req.max_tokens = Some(500); + + sanitize_openai_request(&mut req); + + assert!(req.temperature.is_none()); + assert!(req.top_p.is_none()); + assert!(req.frequency_penalty.is_none()); + assert!(req.presence_penalty.is_none()); + assert!(req.max_tokens.is_none()); + assert_eq!( + req.extra + .as_ref() + .unwrap() + .get("max_completion_tokens") + .unwrap(), + &serde_json::json!(500) + ); + } + + #[test] + fn test_sanitize_strips_sampling_params_for_o4_mini() { + let mut req = test_request("o4-mini"); + req.temperature = Some(0.5); + req.top_p = Some(0.8); + + sanitize_openai_request(&mut req); + + assert!(req.temperature.is_none()); + assert!(req.top_p.is_none()); + } + + #[test] + fn test_sanitize_keeps_sampling_params_for_gpt() { + let mut req = test_request("gpt-5-nano"); + req.temperature = Some(0.7); + req.top_p = Some(0.9); + req.frequency_penalty = Some(0.5); + req.presence_penalty = Some(0.3); + + sanitize_openai_request(&mut req); + + assert_eq!(req.temperature, Some(0.7)); + assert_eq!(req.top_p, Some(0.9)); + assert_eq!(req.frequency_penalty, Some(0.5)); + assert_eq!(req.presence_penalty, Some(0.3)); + } + + #[test] + fn test_is_o_series_model() { + assert!(is_o_series_model("o3")); + assert!(is_o_series_model("o3-pro")); + assert!(is_o_series_model("o3-mini")); + assert!(is_o_series_model("o4-mini")); + assert!(is_o_series_model("o1-preview")); + assert!(!is_o_series_model("gpt-5-nano")); + assert!(!is_o_series_model("gpt-4o")); + assert!(!is_o_series_model("grok-3")); + } } diff --git a/crates/temps-ai-gateway/src/services/gateway_service.rs b/crates/temps-ai-gateway/src/services/gateway_service.rs index bd80a6d5..a1d9c560 100644 --- a/crates/temps-ai-gateway/src/services/gateway_service.rs +++ b/crates/temps-ai-gateway/src/services/gateway_service.rs @@ -170,7 +170,7 @@ impl GatewayService { } /// Send a minimal chat completion to verify a provider API key works. - /// Uses the cheapest model for the provider and `max_tokens: 1`. + /// Uses the cheapest model for the provider and a small `max_tokens`. pub async fn test_provider( &self, provider_id: &str, @@ -200,14 +200,14 @@ impl GatewayService { model: test_model.to_string(), messages: vec![ChatMessage { role: "user".to_string(), - content: Some(MessageContent::Text("Hi".to_string())), + content: Some(MessageContent::Text("Say ok".to_string())), name: None, tool_calls: None, tool_call_id: None, }], stream: false, temperature: None, - max_tokens: Some(1), + max_tokens: Some(20), top_p: None, stop: None, n: None, diff --git a/crates/temps-backup/src/handlers/backup_handler.rs b/crates/temps-backup/src/handlers/backup_handler.rs index f71a6bf1..aad8ad8b 100644 --- a/crates/temps-backup/src/handlers/backup_handler.rs +++ b/crates/temps-backup/src/handlers/backup_handler.rs @@ -322,8 +322,9 @@ impl From for S3SourceResponse { name: source.name, bucket_name: source.bucket_name, bucket_path: source.bucket_path, - access_key_id: source.access_key_id, - secret_key: source.secret_key, + // Credentials are encrypted at rest — mask them in API responses + access_key_id: "***".to_string(), + secret_key: "***".to_string(), region: source.region, endpoint: source.endpoint, force_path_style: source.force_path_style, diff --git a/crates/temps-backup/src/services/backup.rs b/crates/temps-backup/src/services/backup.rs index 840e70b1..c5ccea0e 100644 --- a/crates/temps-backup/src/services/backup.rs +++ b/crates/temps-backup/src/services/backup.rs @@ -2882,6 +2882,47 @@ impl BackupService { Ok(()) } + /// Enforce retention for every active backup schedule. + /// Deletes backups that are older than each schedule's `retention_period` days. + async fn enforce_retention(&self) -> Result<()> { + let schedules = temps_entities::backup_schedules::Entity::find() + .filter(temps_entities::backup_schedules::Column::Enabled.eq(true)) + .all(self.db.as_ref()) + .await?; + + for schedule in &schedules { + if schedule.retention_period > 0 { + let cutoff = Utc::now() - Duration::days(schedule.retention_period as i64); + let old_backups = temps_entities::backups::Entity::find() + .filter(temps_entities::backups::Column::ScheduleId.eq(Some(schedule.id))) + .filter(temps_entities::backups::Column::StartedAt.lt(cutoff)) + .all(self.db.as_ref()) + .await?; + + if !old_backups.is_empty() { + info!( + "Retention cleanup: deleting {} backup(s) older than {} days for schedule {} ({})", + old_backups.len(), + schedule.retention_period, + schedule.id, + schedule.name + ); + } + + for backup in old_backups { + if let Err(e) = self.delete_backup(&backup.backup_id).await { + error!( + "Failed to delete expired backup {} for schedule {}: {}", + backup.backup_id, schedule.id, e + ); + } + } + } + } + + Ok(()) + } + /// List all S3 sources pub async fn list_s3_sources( &self, @@ -3626,6 +3667,19 @@ impl BackupService { } } + // Enforce retention: delete backups older than the schedule's retention period + tokio::select! { + result = self.enforce_retention() => { + if let Err(e) = result { + error!("Error enforcing backup retention: {}", e); + } + } + _ = cancellation_token.cancelled() => { + info!("Backup scheduler received cancellation signal during retention cleanup"); + return Ok(()); + } + } + // Sleep until next hour or cancellation let next_hour = (now + chrono::Duration::hours(1)) .with_minute(0) diff --git a/crates/temps-blob/src/handlers/handler.rs b/crates/temps-blob/src/handlers/handler.rs index ea23870e..e40ea826 100644 --- a/crates/temps-blob/src/handlers/handler.rs +++ b/crates/temps-blob/src/handlers/handler.rs @@ -586,6 +586,9 @@ pub async fn blob_enable( service_type: ServiceType::Blob, version, parameters, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; // Create the service through ExternalServiceManager diff --git a/crates/temps-cli/Cargo.toml b/crates/temps-cli/Cargo.toml index 4b3e34ca..6506ed86 100644 --- a/crates/temps-cli/Cargo.toml +++ b/crates/temps-cli/Cargo.toml @@ -62,6 +62,7 @@ temps-wireguard = { path = "../temps-wireguard" } tokio-util = { workspace = true } # CLI and runtime dependencies - reference from crates workspace +check-if-email-exists = "0.11" clap = "4.4" colored = "2.0" chrono = { workspace = true } diff --git a/crates/temps-cli/src/commands/agent.rs b/crates/temps-cli/src/commands/agent.rs index ed2a5b17..11d24a10 100644 --- a/crates/temps-cli/src/commands/agent.rs +++ b/crates/temps-cli/src/commands/agent.rs @@ -49,8 +49,12 @@ impl AgentCommand { let network_name = temps_core::NETWORK_NAME.clone(); let docker_runtime = Arc::new( - temps_deployer::docker::DockerRuntime::new(Arc::new(docker), true, network_name) - .with_host_bind_address("0.0.0.0".to_string()), + temps_deployer::docker::DockerRuntime::new( + Arc::new(docker.clone()), + true, + network_name, + ) + .with_host_bind_address("0.0.0.0".to_string()), ); let deployer: Arc = docker_runtime.clone(); @@ -58,7 +62,7 @@ impl AgentCommand { tracing::info!("Starting temps agent (node_id={})...", config.node_id); - temps_agent::server::start_agent_server(deployer, builder, config) + temps_agent::server::start_agent_server(deployer, builder, Some(docker), config) .await .map_err(|e| anyhow::anyhow!("Agent server error: {}", e))?; diff --git a/crates/temps-cli/src/commands/join.rs b/crates/temps-cli/src/commands/join.rs index de518ed8..5f138696 100644 --- a/crates/temps-cli/src/commands/join.rs +++ b/crates/temps-cli/src/commands/join.rs @@ -187,13 +187,13 @@ impl JoinCommand { // Step 1: Check if WireGuard is available let wg_manager = temps_wireguard::WireGuardManager::default_config()?; - if let Err(e) = wg_manager.check_available().await { - anyhow::bail!( + wg_manager.check_available().await.map_err(|e| { + anyhow::anyhow!( "WireGuard not available: {}. \ - Install WireGuard or use --private-address for user-managed networking.", + Use --private-address for user-managed networking.", e - ); - } + ) + })?; // Step 2: Generate WireGuard keypair let keypair = wg_manager.generate_keypair().await?; diff --git a/crates/temps-cli/src/commands/serve/mod.rs b/crates/temps-cli/src/commands/serve/mod.rs index 3b4024c9..1b7d626c 100644 --- a/crates/temps-cli/src/commands/serve/mod.rs +++ b/crates/temps-cli/src/commands/serve/mod.rs @@ -64,6 +64,11 @@ pub struct ServeCommand { impl ServeCommand { pub fn execute(self) -> anyhow::Result<()> { + // Install the rustls crypto provider once at startup. Both temps-domains + // and check-if-email-exists try to install it themselves — calling it here + // first satisfies the library's internal Once guard and prevents panics. + check_if_email_exists::initialize_crypto_provider(); + // Set screenshot provider from CLI flag (takes precedence over env var) // This allows: temps serve --screenshot-provider=noop if let Some(ref provider) = self.screenshot_provider { diff --git a/crates/temps-config/src/handler.rs b/crates/temps-config/src/handler.rs index 139353ca..3f2a31e1 100644 --- a/crates/temps-config/src/handler.rs +++ b/crates/temps-config/src/handler.rs @@ -70,6 +70,13 @@ pub struct JoinTokenStatusResponse { pub has_token: bool, } +/// Public settings response containing only non-sensitive feature flags +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct PublicSettingsResponse { + /// Whether demo mode is enabled + pub demo_enabled: bool, +} + /// Safe response for application settings that masks sensitive fields #[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] pub struct AppSettingsResponse { @@ -156,6 +163,7 @@ impl From for AppSettingsResponse { #[derive(OpenApi)] #[openapi( paths( + get_public_settings, get_settings, update_settings, generate_join_token, @@ -168,6 +176,7 @@ impl From for AppSettingsResponse { ContainerLogSettings, DnsProviderSettingsMasked, DockerRegistrySettingsMasked, + PublicSettingsResponse, SettingsUpdateResponse, GenerateJoinTokenResponse, JoinTokenStatusResponse, @@ -183,6 +192,7 @@ pub struct SettingsApiDoc; pub fn configure_routes() -> Router> { Router::new() + .route("/settings/public", get(get_public_settings)) .route("/settings", get(get_settings)) .route("/settings", put(update_settings)) .route("/settings/join-token/generate", post(generate_join_token)) @@ -190,6 +200,37 @@ pub fn configure_routes() -> Router> { .route("/settings/join-token/status", get(get_join_token_status)) } +/// Get public settings (no authentication required) +/// +/// Returns non-sensitive feature flags like demo mode status. +/// This endpoint is intentionally unauthenticated so the login page can use it. +#[utoipa::path( + tag = "Settings", + get, + path = "/settings/public", + responses( + (status = 200, description = "Public settings", body = PublicSettingsResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_public_settings( + State(app_state): State>, +) -> Result { + match app_state.config_service.get_settings().await { + Ok(settings) => Ok(Json(PublicSettingsResponse { + demo_enabled: settings.demo_mode.enabled, + })), + Err(e) => { + tracing::error!("Failed to get public settings: {}", e); + Err(ErrorBuilder::new(StatusCode::INTERNAL_SERVER_ERROR) + .type_("https://temps.sh/probs/settings-error") + .title("Settings Error") + .detail("Failed to get public settings".to_string()) + .build()) + } + } +} + /// Get application settings #[utoipa::path( tag = "Settings", diff --git a/crates/temps-core/src/error_builder.rs b/crates/temps-core/src/error_builder.rs index cd603f18..b7e6ff97 100644 --- a/crates/temps-core/src/error_builder.rs +++ b/crates/temps-core/src/error_builder.rs @@ -119,3 +119,12 @@ pub fn conflict() -> ErrorBuilder { .detail("The request could not be completed due to a conflict with the current state of the resource") .value("error_code", "CONFLICT") } + +pub fn too_many_requests() -> ErrorBuilder { + ErrorBuilder::new(StatusCode::TOO_MANY_REQUESTS) + .type_("https://temps.sh/probs/too-many-requests") + .title("Too Many Requests") + .instance("/error/too-many-requests") + .detail("Rate limit exceeded, please retry later") + .value("error_code", "TOO_MANY_REQUESTS") +} diff --git a/crates/temps-core/src/lib.rs b/crates/temps-core/src/lib.rs index c5e88c55..dbf5535f 100644 --- a/crates/temps-core/src/lib.rs +++ b/crates/temps-core/src/lib.rs @@ -8,6 +8,7 @@ pub mod error_builder; pub mod external_plugin; pub mod jobs; pub mod notifications; +pub mod on_demand; pub mod openapi; pub mod plugin; pub mod problemdetails; @@ -34,6 +35,7 @@ pub use deployment::*; pub use error::*; pub use error_builder::*; pub use jobs::*; +pub use on_demand::*; pub use utils::*; // Re-export external dependencies diff --git a/crates/temps-core/src/on_demand.rs b/crates/temps-core/src/on_demand.rs new file mode 100644 index 00000000..99212fe2 --- /dev/null +++ b/crates/temps-core/src/on_demand.rs @@ -0,0 +1,29 @@ +//! On-demand environment wake/sleep traits +//! +//! These traits avoid circular dependencies between temps-environments (handlers) +//! and temps-proxy (OnDemandManager). The proxy implements these traits and they +//! are injected into the environments AppState via the plugin system. + +use async_trait::async_trait; + +/// Trait for waking/sleeping on-demand environments with full container lifecycle. +/// +/// Unlike `EnvironmentService::set_sleeping` (which only flips the DB flag), +/// implementations of this trait start/stop containers and wait for health checks. +#[async_trait] +pub trait OnDemandWaker: Send + Sync { + /// Wake an environment: start containers, wait for health, set sleeping=false. + /// Returns Ok(()) when the environment is fully running and ready for traffic. + async fn wake_environment( + &self, + environment_id: i32, + wake_timeout_seconds: i32, + ) -> Result<(), Box>; + + /// Sleep an environment: stop containers, set sleeping=true. + /// Returns Ok(true) if this call performed the sleep, Ok(false) if already sleeping. + async fn sleep_environment( + &self, + environment_id: i32, + ) -> Result>; +} diff --git a/crates/temps-deployments/src/handlers/nodes.rs b/crates/temps-deployments/src/handlers/nodes.rs index d6e2153c..ee9f2c60 100644 --- a/crates/temps-deployments/src/handlers/nodes.rs +++ b/crates/temps-deployments/src/handlers/nodes.rs @@ -14,7 +14,7 @@ use axum::{ routing::{get, post}, Json, Router, }; -use sea_orm::DatabaseConnection; +use sea_orm::{DatabaseConnection, EntityTrait}; use serde::{Deserialize, Serialize}; use sha2::Digest; use temps_auth::RequireAuth; @@ -174,11 +174,24 @@ pub struct UndrainNodeResponse { pub message: String, } +/// S3 credentials distributed to agents for backup/restore operations. +#[derive(Serialize, Deserialize, ToSchema)] +pub struct S3CredentialsResponse { + pub access_key_id: String, + pub secret_key: String, + pub region: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint: Option, + pub bucket_name: String, + pub force_path_style: bool, +} + #[derive(OpenApi)] #[openapi( paths( register_node, node_heartbeat, + get_s3_credentials, admin_list_nodes, admin_get_node, admin_list_node_containers, @@ -192,6 +205,7 @@ pub struct UndrainNodeResponse { RegisterNodeResponse, HeartbeatApiRequest, HeartbeatResponse, + S3CredentialsResponse, NodeInfoResponse, NodeListResponse, NodeContainerResponse, @@ -215,6 +229,10 @@ pub fn configure_routes() -> Router> { Router::new() .route("/internal/nodes/register", post(register_node)) .route("/internal/nodes/{node_id}/heartbeat", post(node_heartbeat)) + .route( + "/internal/nodes/{node_id}/s3-credentials/{s3_source_id}", + get(get_s3_credentials), + ) } /// Configure UI-facing admin node routes (session auth via RequireAuth). @@ -464,6 +482,105 @@ async fn node_heartbeat( })) } +/// Get decrypted S3 credentials for a backup/restore operation. +/// +/// Agents call this endpoint to receive the S3 credentials they need to upload +/// or download backups. The credentials are decrypted from the stored S3 source +/// and returned over the authenticated TLS/WireGuard channel. +#[utoipa::path( + tag = "Nodes", + get, + path = "/internal/nodes/{node_id}/s3-credentials/{s3_source_id}", + params( + ("node_id" = i32, Path, description = "Node ID"), + ("s3_source_id" = i32, Path, description = "S3 source ID") + ), + responses( + (status = 200, description = "S3 credentials", body = S3CredentialsResponse), + (status = 401, description = "Unauthorized"), + (status = 404, description = "S3 source not found"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_s3_credentials( + State(app_state): State>, + headers: HeaderMap, + Path((node_id, s3_source_id)): Path<(i32, i32)>, +) -> Result { + // Verify the node's token + let token = extract_bearer_token(&headers)?; + let node = app_state + .node_service + .get_by_id(node_id) + .await + .map_err(Problem::from)?; + + let token_hash = sha256_hash(&token); + if node.token_hash != token_hash { + warn!(node_id, "Invalid token for S3 credentials request"); + return Err(problemdetails::new(StatusCode::UNAUTHORIZED) + .with_title("Invalid Token") + .with_detail(format!("Invalid authentication token for node {}", node_id))); + } + + // Look up the S3 source + let s3_source = temps_entities::s3_sources::Entity::find_by_id(s3_source_id) + .one(app_state.db.as_ref()) + .await + .map_err(|e| { + error!("Failed to look up S3 source {}: {}", s3_source_id, e); + problemdetails::new(StatusCode::INTERNAL_SERVER_ERROR) + .with_title("Database Error") + .with_detail(format!("Failed to look up S3 source: {}", e)) + })? + .ok_or_else(|| { + problemdetails::new(StatusCode::NOT_FOUND) + .with_title("S3 Source Not Found") + .with_detail(format!("S3 source {} not found", s3_source_id)) + })?; + + // Decrypt credentials + let access_key_id = app_state + .encryption_service + .decrypt_string(&s3_source.access_key_id) + .map_err(|e| { + error!( + "Failed to decrypt access key for S3 source {}: {}", + s3_source_id, e + ); + problemdetails::new(StatusCode::INTERNAL_SERVER_ERROR) + .with_title("Decryption Error") + .with_detail("Failed to decrypt S3 credentials") + })?; + + let secret_key = app_state + .encryption_service + .decrypt_string(&s3_source.secret_key) + .map_err(|e| { + error!( + "Failed to decrypt secret key for S3 source {}: {}", + s3_source_id, e + ); + problemdetails::new(StatusCode::INTERNAL_SERVER_ERROR) + .with_title("Decryption Error") + .with_detail("Failed to decrypt S3 credentials") + })?; + + info!( + "Distributed S3 credentials for source {} to node {} ({})", + s3_source_id, node_id, node.name + ); + + Ok(Json(S3CredentialsResponse { + access_key_id, + secret_key, + region: s3_source.region, + endpoint: s3_source.endpoint, + bucket_name: s3_source.bucket_name, + force_path_style: s3_source.force_path_style.unwrap_or(true), + })) +} + /// List all registered nodes (admin — session auth via RequireAuth) #[utoipa::path( tag = "Nodes", diff --git a/crates/temps-domains/src/plugin.rs b/crates/temps-domains/src/plugin.rs index 9046c176..21b74044 100644 --- a/crates/temps-domains/src/plugin.rs +++ b/crates/temps-domains/src/plugin.rs @@ -13,7 +13,6 @@ use crate::{ handlers::{self, create_domain_app_state_with_dns, DomainAppState}, tls::{repository::DefaultCertificateRepository, TlsServiceBuilder}, }; -use rustls::crypto::CryptoProvider; use temps_dns::services::DnsProviderService; /// Domains Plugin for managing DNS records and TLS certificates @@ -41,7 +40,6 @@ impl TempsPlugin for DomainsPlugin { context: &'a ServiceRegistrationContext, ) -> Pin> + Send + 'a>> { Box::pin(async move { - CryptoProvider::install_default(rustls::crypto::ring::default_provider()).unwrap(); // Get required dependencies from the service registry let db = context.require_service::(); let encryption_service = context.require_service::(); diff --git a/crates/temps-email/src/handlers/validation.rs b/crates/temps-email/src/handlers/validation.rs index 4918b7b0..fcbd5fe5 100644 --- a/crates/temps-email/src/handlers/validation.rs +++ b/crates/temps-email/src/handlers/validation.rs @@ -266,9 +266,17 @@ pub async fn validate_email( .await .map_err(|e| { error!("Failed to validate email: {}", e); - internal_server_error() - .detail(format!("Failed to validate email: {}", e)) - .build() + match &e { + crate::errors::EmailError::Validation(msg) if msg.contains("timed out") => { + bad_request().detail(format!("{}", e)).build() + } + crate::errors::EmailError::Validation(_) => { + bad_request().detail(format!("{}", e)).build() + } + _ => internal_server_error() + .detail(format!("Failed to validate email: {}", e)) + .build(), + } })?; Ok((StatusCode::OK, Json(ValidateEmailResponse::from(result)))) diff --git a/crates/temps-email/src/services/validation_service.rs b/crates/temps-email/src/services/validation_service.rs index a729a29f..133e3be8 100644 --- a/crates/temps-email/src/services/validation_service.rs +++ b/crates/temps-email/src/services/validation_service.rs @@ -3,8 +3,11 @@ //! This service provides email validation capabilities to check if an email //! address exists without sending any email. -use check_if_email_exists::{check_email, CheckEmailInputBuilder, CheckEmailOutput, Reachable}; +use check_if_email_exists::{ + check_email, CheckEmailInputBuilder, CheckEmailInputProxy, CheckEmailOutput, Reachable, +}; use serde::{Deserialize, Serialize}; +use std::time::Duration; use tracing::{debug, info}; use crate::errors::EmailError; @@ -248,16 +251,67 @@ impl ValidationService { ) -> Result { info!("Validating email: {}", request.email); - // Build the check email input using the builder pattern (v0.11 API) - let input = CheckEmailInputBuilder::default() - .to_email(request.email.clone()) + use check_if_email_exists::smtp::verif_method::VerifMethod; + + let mut builder = CheckEmailInputBuilder::default(); + builder.to_email(request.email.clone()); + + let from_email = self + .config + .from_email + .clone() + .unwrap_or_else(|| "noreply@temps.sh".to_string()); + let hello_name = self + .config + .hello_name + .clone() + .unwrap_or_else(|| "temps.sh".to_string()); + + // Build proxy input if provided + let proxy_input = request.proxy.as_ref().map(|p| CheckEmailInputProxy { + host: p.host.clone(), + port: p.port, + username: p.username.clone(), + password: p.password.clone(), + timeout_ms: Some(10_000), + }); + + // Always set a VerifMethod with SMTP timeout to avoid hanging + let verif_method = VerifMethod::new_with_same_config_for_all( + proxy_input, + hello_name, + from_email, + 25, + Some(Duration::from_secs(10)), + 1, + ); + builder.verif_method(verif_method); + + let input = builder .build() .map_err(|e| EmailError::Validation(format!("Failed to build email input: {}", e)))?; debug!("Calling check_email for: {}", request.email); - // Perform the validation - returns a single CheckEmailOutput - let output = check_email(&input).await; + // Outer timeout as a safety net + let output = tokio::time::timeout(Duration::from_secs(20), async { + // Catch panics from the library (e.g. duplicate rustls crypto provider install) + let result = + tokio::task::spawn(async move { check_email(&input).await }).await; + result.map_err(|e| { + EmailError::Validation(format!( + "Email validation failed for internal error: {}", + e + )) + }) + }) + .await + .map_err(|_| { + EmailError::Validation(format!( + "Email validation timed out for {}. SMTP port 25 may be blocked — consider using a SOCKS5 proxy.", + request.email + )) + })??; debug!( "Email validation result for {}: is_reachable={:?}", diff --git a/crates/temps-entities/src/deployment_config.rs b/crates/temps-entities/src/deployment_config.rs index 6b53032d..03c0cef4 100644 --- a/crates/temps-entities/src/deployment_config.rs +++ b/crates/temps-entities/src/deployment_config.rs @@ -45,6 +45,10 @@ pub struct SecurityConfig { /// Geographic restrictions (future: country blocking, etc.) #[serde(skip_serializing_if = "Option::is_none")] pub geo_restrictions: Option, + + /// Password protection: shows an HTML password form before allowing access + #[serde(skip_serializing_if = "Option::is_none")] + pub password_protection: Option, } /// Security headers configuration (subset of global SecurityHeadersSettings) @@ -122,6 +126,21 @@ pub struct GeoRestrictionsConfig { pub allowed_countries: Vec, } +/// Password protection configuration +/// +/// When enabled, the proxy shows an HTML password form before allowing access. +/// After the user enters the correct password, an HMAC-signed cookie is set +/// so subsequent requests pass through without re-entering the password. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, FromJsonQueryResult)] +#[serde(rename_all = "camelCase")] +pub struct PasswordProtectionConfig { + /// Whether password protection is enabled + pub enabled: bool, + + /// The bcrypt-hashed password (never stored or returned in plaintext) + pub password_hash: String, +} + impl SecurityConfig { /// Create a new security configuration with default values pub fn new() -> Self { @@ -159,6 +178,10 @@ impl SecurityConfig { .geo_restrictions .clone() .or_else(|| self.geo_restrictions.clone()), + password_protection: other + .password_protection + .clone() + .or_else(|| self.password_protection.clone()), } } } @@ -492,6 +515,14 @@ impl DeploymentConfig { } } + // Replicas must be at least 1 + if self.replicas < 1 { + return Err(format!( + "Replicas must be at least 1, got {}", + self.replicas + )); + } + // Port should be in valid range if let Some(port) = self.exposed_port { if !(1..=65535).contains(&port) { diff --git a/crates/temps-entities/src/external_services.rs b/crates/temps-entities/src/external_services.rs index b4dabfc0..26dad16d 100644 --- a/crates/temps-entities/src/external_services.rs +++ b/crates/temps-entities/src/external_services.rs @@ -20,6 +20,11 @@ pub struct Model { pub config: Option, /// Node this service runs on. NULL = local node (single-node mode). pub node_id: Option, + /// Service topology: 'standalone' (single container) or 'cluster' (multiple members). + #[sea_orm(default_value = "standalone")] + pub topology: String, + /// Error message from failed initialization (null if no error). + pub error_message: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] @@ -28,6 +33,8 @@ pub enum Relation { Backups, #[sea_orm(has_many = "super::project_services::Entity")] ProjectServices, + #[sea_orm(has_many = "super::service_members::Entity")] + Members, #[sea_orm( belongs_to = "super::nodes::Entity", from = "Column::NodeId", @@ -48,6 +55,12 @@ impl Related for Entity { } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Members.def() + } +} + impl Related for Entity { fn to() -> RelationDef { Relation::Node.def() diff --git a/crates/temps-entities/src/lib.rs b/crates/temps-entities/src/lib.rs index 4036a569..7de5dbec 100644 --- a/crates/temps-entities/src/lib.rs +++ b/crates/temps-entities/src/lib.rs @@ -51,6 +51,7 @@ pub mod repositories; pub mod request_sessions; pub mod roles; pub mod s3_sources; +pub mod service_members; pub mod sessions; pub mod source_type; pub mod static_bundles; diff --git a/crates/temps-entities/src/service_members.rs b/crates/temps-entities/src/service_members.rs new file mode 100644 index 00000000..ed7fbb0a --- /dev/null +++ b/crates/temps-entities/src/service_members.rs @@ -0,0 +1,80 @@ +use async_trait::async_trait; +use sea_orm::entity::prelude::*; +use sea_orm::{ActiveValue::Set, ConnectionTrait, DbErr}; +use serde::{Deserialize, Serialize}; +use temps_core::DBDateTime; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)] +#[sea_orm(table_name = "service_members")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub service_id: i32, + /// Node this member runs on. NULL = local node (control plane). + pub node_id: Option, + /// Service-type-specific role: 'primary', 'replica', 'monitor', 'arbiter', 'sentinel', 'node' + pub role: String, + pub container_id: Option, + pub container_name: String, + /// WireGuard IP or DNS name for inter-member communication + pub hostname: Option, + pub port: Option, + pub status: String, + /// Stable member identity (member-0, member-1, etc.) + pub ordinal: i32, + /// Encrypted member-specific config overrides + pub config: Option, + pub created_at: DBDateTime, + pub updated_at: DBDateTime, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::external_services::Entity", + from = "Column::ServiceId", + to = "super::external_services::Column::Id" + )] + Service, + #[sea_orm( + belongs_to = "super::nodes::Entity", + from = "Column::NodeId", + to = "super::nodes::Column::Id" + )] + Node, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Service.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Node.def() + } +} + +#[async_trait] +impl ActiveModelBehavior for ActiveModel { + async fn before_save(mut self, _db: &C, insert: bool) -> Result + where + C: ConnectionTrait, + { + let now = chrono::Utc::now(); + + if insert { + if self.created_at.is_not_set() { + self.created_at = Set(now); + } + if self.updated_at.is_not_set() { + self.updated_at = Set(now); + } + } else { + self.updated_at = Set(now); + } + + Ok(self) + } +} diff --git a/crates/temps-environments/Cargo.toml b/crates/temps-environments/Cargo.toml index 619cd993..436c7154 100644 --- a/crates/temps-environments/Cargo.toml +++ b/crates/temps-environments/Cargo.toml @@ -30,3 +30,4 @@ axum-macros = { workspace = true } http = { workspace = true } futures-util = { workspace = true } slug = { workspace = true } +argon2 = { workspace = true } diff --git a/crates/temps-environments/src/handlers/audit.rs b/crates/temps-environments/src/handlers/audit.rs index d53faa48..a1be6e2f 100644 --- a/crates/temps-environments/src/handlers/audit.rs +++ b/crates/temps-environments/src/handlers/audit.rs @@ -49,6 +49,40 @@ impl AuditOperation for EnvironmentSettingsUpdatedAudit { } } +#[derive(Debug, Clone, Serialize)] +pub struct EnvironmentSleepStateChangedAudit { + pub context: AuditContext, + pub project_id: i32, + pub environment_id: i32, + pub environment_name: String, + pub environment_slug: String, + pub previous_state: &'static str, + pub new_state: &'static str, +} + +impl AuditOperation for EnvironmentSleepStateChangedAudit { + fn operation_type(&self) -> String { + "ENVIRONMENT_SLEEP_STATE_CHANGED".to_string() + } + + fn user_id(&self) -> i32 { + self.context.user_id + } + + fn ip_address(&self) -> Option { + self.context.ip_address.clone() + } + + fn user_agent(&self) -> &str { + &self.context.user_agent + } + + fn serialize(&self) -> Result { + serde_json::to_string(self) + .map_err(|e| anyhow::anyhow!("Failed to serialize audit operation {}", e)) + } +} + #[derive(Debug, Clone, Serialize)] pub struct EnvironmentDeletedAudit { pub context: AuditContext, diff --git a/crates/temps-environments/src/handlers/handler.rs b/crates/temps-environments/src/handlers/handler.rs index 7174a1e0..a3e617e9 100644 --- a/crates/temps-environments/src/handlers/handler.rs +++ b/crates/temps-environments/src/handlers/handler.rs @@ -1,5 +1,6 @@ use super::audit::{ EnvironmentDeletedAudit, EnvironmentSettingsUpdatedAudit, EnvironmentSettingsUpdatedFields, + EnvironmentSleepStateChangedAudit, }; use super::types::AppState; use axum::Router; @@ -634,8 +635,9 @@ pub async fn update_environment_settings( /// Wake a sleeping on-demand environment /// /// Manually wake an environment that has been put to sleep by the on-demand -/// idle timeout. Sets `sleeping = false` on the environment. The proxy will -/// detect the state change and start containers on the next request. +/// idle timeout. Starts containers, waits for health checks, then sets +/// `sleeping = false`. If no OnDemandWaker is available (proxy not running +/// in same process), falls back to setting the DB flag only. #[utoipa::path( post, path = "/projects/{project_id}/environments/{env_id}/wake", @@ -644,6 +646,7 @@ pub async fn update_environment_settings( (status = 200, description = "Environment woken up", body = EnvironmentResponse), (status = 400, description = "On-demand not enabled for this environment"), (status = 404, description = "Environment not found"), + (status = 429, description = "Too many state transitions, retry after cooldown"), (status = 500, description = "Internal server error") ), params( @@ -659,9 +662,61 @@ pub async fn wake_environment( ) -> Result { permission_guard!(auth, EnvironmentsWrite); + // Cooldown: reject if last state change was less than 30 seconds ago + let environment = state + .environment_service + .get_environment(project_id, env_id) + .await?; + + let seconds_since_update = (chrono::Utc::now() - environment.updated_at).num_seconds(); + if seconds_since_update < 30 { + return Err(temps_core::error_builder::too_many_requests() + .title("State Transition Cooldown") + .detail(format!( + "Environment {} was updated {}s ago. Please wait at least 30s between state transitions.", + env_id, seconds_since_update + )) + .build()); + } + + // Use the full container lifecycle wake if available + if let Some(ref waker) = state.on_demand_waker { + let wake_timeout = environment + .deployment_config + .as_ref() + .map(|c| c.wake_timeout_seconds) + .unwrap_or(30); + + waker + .wake_environment(env_id, wake_timeout) + .await + .map_err(|e| { + error!( + environment_id = env_id, + error = %e, + "Failed to wake environment via OnDemandWaker" + ); + temps_core::error_builder::internal_server_error() + .title("Wake Failed") + .detail(format!("Failed to wake environment {}: {}", env_id, e)) + .build() + })?; + } else { + // No OnDemandWaker available — cannot safely wake without starting containers + return Err(temps_core::error_builder::internal_server_error() + .title("Wake Unavailable") + .detail(format!( + "Cannot wake environment {}: on-demand container lifecycle manager is not available. \ + The environment will be woken automatically when the next request arrives via the proxy.", + env_id + )) + .build()); + } + + // Re-read the environment after wake let updated_environment = state .environment_service - .set_sleeping(project_id, env_id, false) + .get_environment(project_id, env_id) .await?; info!( @@ -679,23 +734,14 @@ pub async fn wake_environment( let _ = state .audit_service - .create_audit_log(&EnvironmentSettingsUpdatedAudit { + .create_audit_log(&EnvironmentSleepStateChangedAudit { context: audit_context, project_id, - project_name: String::new(), - project_slug: String::new(), environment_id: env_id, environment_name: updated_environment.name.clone(), environment_slug: updated_environment.slug.clone(), - updated_settings: EnvironmentSettingsUpdatedFields { - cpu_request: None, - cpu_limit: None, - memory_request: None, - memory_limit: None, - branch: None, - replicas: None, - security_updated: false, - }, + previous_state: "sleeping", + new_state: "awake", }) .await; @@ -724,8 +770,8 @@ pub async fn wake_environment( /// Sleep an on-demand environment /// -/// Manually put an on-demand environment to sleep. Sets `sleeping = true`. -/// The proxy will stop sending traffic and the idle sweep will stop containers. +/// Manually put an on-demand environment to sleep. Stops containers and sets +/// `sleeping = true`. If no OnDemandWaker is available, falls back to DB flag only. #[utoipa::path( post, path = "/projects/{project_id}/environments/{env_id}/sleep", @@ -734,6 +780,7 @@ pub async fn wake_environment( (status = 200, description = "Environment put to sleep", body = EnvironmentResponse), (status = 400, description = "On-demand not enabled for this environment"), (status = 404, description = "Environment not found"), + (status = 429, description = "Too many state transitions, retry after cooldown"), (status = 500, description = "Internal server error") ), params( @@ -749,9 +796,48 @@ pub async fn sleep_environment( ) -> Result { permission_guard!(auth, EnvironmentsWrite); + // Cooldown: reject if last state change was less than 30 seconds ago + let environment = state + .environment_service + .get_environment(project_id, env_id) + .await?; + + let seconds_since_update = (chrono::Utc::now() - environment.updated_at).num_seconds(); + if seconds_since_update < 30 { + return Err(temps_core::error_builder::too_many_requests() + .title("State Transition Cooldown") + .detail(format!( + "Environment {} was updated {}s ago. Please wait at least 30s between state transitions.", + env_id, seconds_since_update + )) + .build()); + } + + // Use the full container lifecycle sleep if available + if let Some(ref waker) = state.on_demand_waker { + waker.sleep_environment(env_id).await.map_err(|e| { + error!( + environment_id = env_id, + error = %e, + "Failed to sleep environment via OnDemandWaker" + ); + temps_core::error_builder::internal_server_error() + .title("Sleep Failed") + .detail(format!("Failed to sleep environment {}: {}", env_id, e)) + .build() + })?; + } else { + // Fallback: set DB flag only + state + .environment_service + .set_sleeping(project_id, env_id, true) + .await?; + } + + // Re-read the environment after sleep let updated_environment = state .environment_service - .set_sleeping(project_id, env_id, true) + .get_environment(project_id, env_id) .await?; info!( @@ -769,23 +855,14 @@ pub async fn sleep_environment( let _ = state .audit_service - .create_audit_log(&EnvironmentSettingsUpdatedAudit { + .create_audit_log(&EnvironmentSleepStateChangedAudit { context: audit_context, project_id, - project_name: String::new(), - project_slug: String::new(), environment_id: env_id, environment_name: updated_environment.name.clone(), environment_slug: updated_environment.slug.clone(), - updated_settings: EnvironmentSettingsUpdatedFields { - cpu_request: None, - cpu_limit: None, - memory_request: None, - memory_limit: None, - branch: None, - replicas: None, - security_updated: false, - }, + previous_state: "awake", + new_state: "sleeping", }) .await; diff --git a/crates/temps-environments/src/handlers/types.rs b/crates/temps-environments/src/handlers/types.rs index daa0bcdc..78136c6d 100644 --- a/crates/temps-environments/src/handlers/types.rs +++ b/crates/temps-environments/src/handlers/types.rs @@ -12,6 +12,9 @@ pub struct AppState { pub env_var_service: Arc, pub audit_service: Arc, pub deployment_service: Arc, + /// Optional on-demand waker for starting/stopping containers during wake/sleep. + /// Only available when the proxy's OnDemandManager is registered. + pub on_demand_waker: Option>, } pub fn create_environment_app_state( @@ -19,12 +22,14 @@ pub fn create_environment_app_state( env_var_service: Arc, audit_service: Arc, deployment_service: Arc, + on_demand_waker: Option>, ) -> Arc { Arc::new(AppState { environment_service, env_var_service, audit_service, deployment_service, + on_demand_waker, }) } @@ -191,6 +196,12 @@ pub struct UpdateEnvironmentSettingsRequest { /// Max seconds to wait for containers to start on wake (5-120). Default: 30. #[serde(skip_serializing_if = "Option::is_none")] pub wake_timeout_seconds: Option, + /// Set a password to protect this environment. The proxy will show an HTML + /// password form before allowing access. The password is bcrypt-hashed + /// server-side and never stored in plaintext. + /// Send an empty string to remove password protection. + #[serde(skip_serializing_if = "Option::is_none")] + pub password: Option, } #[derive(Serialize, Deserialize, ToSchema)] diff --git a/crates/temps-environments/src/plugin.rs b/crates/temps-environments/src/plugin.rs index 7ee0a784..c8a59e35 100644 --- a/crates/temps-environments/src/plugin.rs +++ b/crates/temps-environments/src/plugin.rs @@ -60,12 +60,14 @@ impl TempsPlugin for EnvironmentsPlugin { let audit_service = context.require_service::(); let env_var_service = context.require_service::(); let deployment_service = context.require_service::(); + let on_demand_waker = context.get_service::(); let app_state = crate::handlers::create_environment_app_state( environment_service, env_var_service, audit_service, deployment_service, + on_demand_waker, ); let routes = crate::handlers::configure_routes().with_state(app_state); diff --git a/crates/temps-environments/src/services/environment_service.rs b/crates/temps-environments/src/services/environment_service.rs index 5621a69b..e785e95c 100644 --- a/crates/temps-environments/src/services/environment_service.rs +++ b/crates/temps-environments/src/services/environment_service.rs @@ -1,6 +1,6 @@ use sea_orm::{ - ActiveModelTrait, ColumnTrait, DbErr, EntityTrait, QueryFilter, QueryOrder, Set, - TransactionTrait, + ActiveModelTrait, ColumnTrait, ConnectionTrait, DbErr, EntityTrait, QueryFilter, QueryOrder, + Set, Statement, TransactionTrait, }; use serde::Serialize; use slug::slugify; @@ -58,23 +58,29 @@ impl From for Problem { EnvironmentError::InvalidInput(msg) => { temps_core::error_builder::bad_request().detail(msg).build() } - EnvironmentError::DatabaseConnectionError(msg) => { + EnvironmentError::DatabaseConnectionError(_) => { + // Log full details server-side, return generic message to client + warn!("Database connection error: {}", error); temps_core::error_builder::internal_server_error() - .detail(msg) + .detail("A database error occurred while processing the request") .build() } - EnvironmentError::DatabaseError { reason } => { + EnvironmentError::DatabaseError { .. } => { + warn!("Database error: {}", error); temps_core::error_builder::internal_server_error() - .detail(reason) + .detail("A database error occurred while processing the request") .build() } EnvironmentError::BranchAlreadyInUse { .. } => temps_core::error_builder::bad_request() .title("Branch Already In Use") .detail(error.to_string()) .build(), - EnvironmentError::Other(msg) => temps_core::error_builder::internal_server_error() - .detail(msg) - .build(), + EnvironmentError::Other(_) => { + warn!("Environment error: {}", error); + temps_core::error_builder::internal_server_error() + .detail("An internal error occurred while processing the request") + .build() + } } } } @@ -111,7 +117,16 @@ impl EnvironmentService { } pub async fn compute_environment_url(&self, environment_slug: &str) -> String { - let settings = self.config_service.get_settings().await.unwrap_or_default(); + let settings = match self.config_service.get_settings().await { + Ok(s) => s, + Err(e) => { + warn!( + "Failed to load settings for URL computation, using defaults: {}", + e + ); + Default::default() + } + }; // Use external_url if configured, otherwise fall back to preview_domain let base_domain = settings.preview_domain.clone(); @@ -550,7 +565,43 @@ impl EnvironmentService { if let Some(session_recording_enabled) = settings.session_recording_enabled { deployment_config.session_recording_enabled = session_recording_enabled; } - if let Some(security) = settings.security { + if let Some(mut security) = settings.security { + // Preserve existing password_protection — it's managed separately via the `password` field + if security.password_protection.is_none() { + security.password_protection = deployment_config + .security + .as_ref() + .and_then(|s| s.password_protection.clone()); + } + deployment_config.security = Some(security); + } + // Handle password protection: hash plaintext password with argon2 + if let Some(ref password) = settings.password { + let mut security = deployment_config.security.clone().unwrap_or_default(); + if password.is_empty() { + // Empty string removes password protection + security.password_protection = None; + } else { + use argon2::password_hash::{rand_core::OsRng, SaltString}; + use argon2::{Argon2, PasswordHasher}; + let salt = SaltString::generate(&mut OsRng); + let argon2 = Argon2::default(); + let hash = argon2 + .hash_password(password.as_bytes(), &salt) + .map_err(|e| { + EnvironmentError::InvalidInput(format!( + "Failed to hash password for environment {}: {}", + env_id, e + )) + })? + .to_string(); + security.password_protection = Some( + temps_entities::deployment_config::PasswordProtectionConfig { + enabled: true, + password_hash: hash, + }, + ); + } deployment_config.security = Some(security); } if settings.target_nodes.is_some() { @@ -595,6 +646,8 @@ impl EnvironmentService { } /// Set the sleeping state of an environment (for on-demand scale-to-zero). + /// Uses atomic CAS (UPDATE WHERE) to prevent race conditions between + /// concurrent API calls and proxy-initiated state transitions. /// Returns the updated environment model. pub async fn set_sleeping( &self, @@ -602,9 +655,9 @@ impl EnvironmentService { env_id: i32, sleeping: bool, ) -> Result { + // First verify the environment exists, belongs to the project, and has on-demand enabled let environment = self.get_environment(project_id, env_id).await?; - // Verify on-demand is enabled for this environment let on_demand = environment .deployment_config .as_ref() @@ -623,16 +676,24 @@ impl EnvironmentService { return Ok(environment); } - let mut active_model: environments::ActiveModel = environment.into(); - active_model.sleeping = Set(sleeping); - active_model.updated_at = Set(chrono::Utc::now()); - - let updated = active_model - .update(self.db.as_ref()) + // Atomic CAS: only succeeds if state hasn't changed since we read it + let result = self + .db + .execute(Statement::from_sql_and_values( + sea_orm::DatabaseBackend::Postgres, + "UPDATE environments SET sleeping = $1, updated_at = NOW() WHERE id = $2 AND project_id = $3 AND sleeping = $4", + [sleeping.into(), env_id.into(), project_id.into(), (!sleeping).into()], + )) .await .map_err(|e| EnvironmentError::DatabaseConnectionError(e.to_string()))?; - Ok(updated) + if result.rows_affected() == 0 { + // Another caller already changed the state — re-read and return current + return self.get_environment(project_id, env_id).await; + } + + // Re-read the updated environment + self.get_environment(project_id, env_id).await } pub async fn get_environment_domains( @@ -957,6 +1018,7 @@ mod tests { on_demand: None, idle_timeout_seconds: None, wake_timeout_seconds: None, + password: None, }, ) .await; diff --git a/crates/temps-kv/src/handlers/handler.rs b/crates/temps-kv/src/handlers/handler.rs index f62f030a..baa2dce0 100644 --- a/crates/temps-kv/src/handlers/handler.rs +++ b/crates/temps-kv/src/handlers/handler.rs @@ -524,6 +524,9 @@ pub async fn kv_enable( service_type: ServiceType::Redis, version, parameters, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; // Create the service through ExternalServiceManager diff --git a/crates/temps-migrations/src/migration/m20260313_000001_add_service_members.rs b/crates/temps-migrations/src/migration/m20260313_000001_add_service_members.rs new file mode 100644 index 00000000..ceda74b5 --- /dev/null +++ b/crates/temps-migrations/src/migration/m20260313_000001_add_service_members.rs @@ -0,0 +1,191 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // Add topology column to external_services (default 'standalone' for backward compat) + manager + .alter_table( + Table::alter() + .table(ExternalServices::Table) + .add_column( + ColumnDef::new(ExternalServices::Topology) + .string_len(20) + .not_null() + .default("standalone"), + ) + .to_owned(), + ) + .await?; + + // Create service_members table + manager + .create_table( + Table::create() + .table(ServiceMembers::Table) + .if_not_exists() + .col( + ColumnDef::new(ServiceMembers::Id) + .integer() + .not_null() + .auto_increment() + .primary_key(), + ) + .col( + ColumnDef::new(ServiceMembers::ServiceId) + .integer() + .not_null(), + ) + .col(ColumnDef::new(ServiceMembers::NodeId).integer().null()) + .col( + ColumnDef::new(ServiceMembers::Role) + .string_len(30) + .not_null(), + ) + .col( + ColumnDef::new(ServiceMembers::ContainerId) + .string_len(255) + .null(), + ) + .col( + ColumnDef::new(ServiceMembers::ContainerName) + .string_len(255) + .not_null(), + ) + .col( + ColumnDef::new(ServiceMembers::Hostname) + .string_len(255) + .null(), + ) + .col(ColumnDef::new(ServiceMembers::Port).integer().null()) + .col( + ColumnDef::new(ServiceMembers::Status) + .string_len(30) + .not_null() + .default("provisioning"), + ) + .col( + ColumnDef::new(ServiceMembers::Ordinal) + .integer() + .not_null() + .default(0), + ) + .col(ColumnDef::new(ServiceMembers::Config).text().null()) + .col( + ColumnDef::new(ServiceMembers::CreatedAt) + .timestamp_with_time_zone() + .not_null() + .default(Expr::current_timestamp()), + ) + .col( + ColumnDef::new(ServiceMembers::UpdatedAt) + .timestamp_with_time_zone() + .not_null() + .default(Expr::current_timestamp()), + ) + .foreign_key( + ForeignKey::create() + .name("fk_service_members_service") + .from(ServiceMembers::Table, ServiceMembers::ServiceId) + .to(ExternalServices::Table, ExternalServices::Id) + .on_delete(ForeignKeyAction::Cascade), + ) + .foreign_key( + ForeignKey::create() + .name("fk_service_members_node") + .from(ServiceMembers::Table, ServiceMembers::NodeId) + .to(Nodes::Table, Nodes::Id) + .on_delete(ForeignKeyAction::SetNull), + ) + .to_owned(), + ) + .await?; + + // Indexes + manager + .create_index( + Index::create() + .name("idx_service_members_service_id") + .table(ServiceMembers::Table) + .col(ServiceMembers::ServiceId) + .to_owned(), + ) + .await?; + + manager + .create_index( + Index::create() + .name("idx_service_members_node_id") + .table(ServiceMembers::Table) + .col(ServiceMembers::NodeId) + .to_owned(), + ) + .await?; + + // Unique constraint: one ordinal per service + manager + .create_index( + Index::create() + .name("uq_service_members_service_ordinal") + .table(ServiceMembers::Table) + .col(ServiceMembers::ServiceId) + .col(ServiceMembers::Ordinal) + .unique() + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_table(Table::drop().table(ServiceMembers::Table).to_owned()) + .await?; + + manager + .alter_table( + Table::alter() + .table(ExternalServices::Table) + .drop_column(ExternalServices::Topology) + .to_owned(), + ) + .await?; + + Ok(()) + } +} + +#[derive(DeriveIden)] +enum ExternalServices { + Table, + Id, + Topology, +} + +#[derive(DeriveIden)] +enum ServiceMembers { + Table, + Id, + ServiceId, + NodeId, + Role, + ContainerId, + ContainerName, + Hostname, + Port, + Status, + Ordinal, + Config, + CreatedAt, + UpdatedAt, +} + +#[derive(DeriveIden)] +enum Nodes { + Table, + Id, +} diff --git a/crates/temps-migrations/src/migration/m20260313_000002_add_service_error_message.rs b/crates/temps-migrations/src/migration/m20260313_000002_add_service_error_message.rs new file mode 100644 index 00000000..15cfa8b9 --- /dev/null +++ b/crates/temps-migrations/src/migration/m20260313_000002_add_service_error_message.rs @@ -0,0 +1,39 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(ExternalServices::Table) + .add_column(ColumnDef::new(ExternalServices::ErrorMessage).text().null()) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(ExternalServices::Table) + .drop_column(ExternalServices::ErrorMessage) + .to_owned(), + ) + .await?; + + Ok(()) + } +} + +#[derive(DeriveIden)] +enum ExternalServices { + Table, + ErrorMessage, +} diff --git a/crates/temps-migrations/src/migration/mod.rs b/crates/temps-migrations/src/migration/mod.rs index 0a2d4479..2bfecee3 100644 --- a/crates/temps-migrations/src/migration/mod.rs +++ b/crates/temps-migrations/src/migration/mod.rs @@ -42,6 +42,8 @@ mod m20260310_000004_add_is_byok_to_ai_usage_logs; mod m20260310_000005_add_agent_tracking_to_ai_usage_logs; mod m20260310_000006_add_environment_protection; mod m20260311_000001_add_on_demand_environments; +mod m20260313_000001_add_service_members; +mod m20260313_000002_add_service_error_message; pub struct Migrator; @@ -91,6 +93,8 @@ impl MigratorTrait for Migrator { Box::new(m20260310_000005_add_agent_tracking_to_ai_usage_logs::Migration), Box::new(m20260310_000006_add_environment_protection::Migration), Box::new(m20260311_000001_add_on_demand_environments::Migration), + Box::new(m20260313_000001_add_service_members::Migration), + Box::new(m20260313_000002_add_service_error_message::Migration), ] } } diff --git a/crates/temps-providers/Cargo.toml b/crates/temps-providers/Cargo.toml index e5c3e161..d49f187d 100644 --- a/crates/temps-providers/Cargo.toml +++ b/crates/temps-providers/Cargo.toml @@ -55,3 +55,4 @@ docker-tests = [] tokio-test = "0.4" testcontainers = { workspace = true } hyper = { version = "1", features = ["client", "http1", "http2"] } +tokio-postgres = "0.7" diff --git a/crates/temps-providers/src/externalsvc/cluster_integration_tests.rs b/crates/temps-providers/src/externalsvc/cluster_integration_tests.rs new file mode 100644 index 00000000..6e97f3ef --- /dev/null +++ b/crates/temps-providers/src/externalsvc/cluster_integration_tests.rs @@ -0,0 +1,1219 @@ +//! Integration tests for service clusters. +//! +//! These tests require Docker and the `gotempsh/postgres-ha:18-bookworm` image. +//! They test the full lifecycle: cluster creation, data replication, failover, and recovery. +//! +//! Run unit tests: `cargo test --lib -p temps-providers -- cluster_integration --nocapture` +//! Run Docker tests: `cargo test --lib -p temps-providers --features docker-tests -- cluster_integration --nocapture` + +#[cfg(test)] +mod tests { + use bollard::Docker; + use std::sync::Arc; + + use crate::externalsvc::postgres_cluster::PostgresClusterService; + use crate::externalsvc::{ + ClusterMemberInfo, ClusterMemberSpec, ExternalService, ServiceConfig, ServiceType, + }; + + // ----------------------------------------------------------------------- + // Trait-level tests (no Docker required) + // ----------------------------------------------------------------------- + + #[test] + fn test_postgres_cluster_supports_cluster() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + assert!(service.supports_cluster()); + } + + #[test] + fn test_postgres_cluster_roles() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + let roles = service.valid_cluster_roles(); + assert!(roles.contains(&"monitor")); + assert!(roles.contains(&"primary")); + assert!(roles.contains(&"replica")); + } + + #[tokio::test] + async fn test_init_cluster_requires_monitor() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({}), + }; + + let result = service + .init_cluster( + config, + vec![ClusterMemberSpec { + role: "primary".to_string(), + node_id: None, + ordinal: 0, + hostname: None, + }], + ) + .await; + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("monitor")); + } + + #[tokio::test] + async fn test_init_cluster_returns_correct_members() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("mydb".to_string(), Arc::new(docker)); + + let config = ServiceConfig { + name: "mydb".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({ + "database": "myapp", + "username": "admin", + "password": "secret" + }), + }; + + let members = vec![ + ClusterMemberSpec { + role: "monitor".to_string(), + node_id: None, + ordinal: 0, + hostname: Some("10.0.0.1".to_string()), + }, + ClusterMemberSpec { + role: "primary".to_string(), + node_id: Some(1), + ordinal: 1, + hostname: Some("10.0.0.2".to_string()), + }, + ClusterMemberSpec { + role: "replica".to_string(), + node_id: Some(2), + ordinal: 2, + hostname: Some("10.0.0.3".to_string()), + }, + ]; + + let results = service.init_cluster(config, members).await.unwrap(); + + assert_eq!(results.len(), 3); + assert_eq!(results[0].role, "monitor"); + assert_eq!(results[0].container_name, "postgres-mydb-monitor"); + assert_eq!(results[0].ordinal, 0); + assert_eq!(results[1].role, "primary"); + assert_eq!(results[1].container_name, "postgres-mydb-1"); + assert_eq!(results[1].ordinal, 1); + assert_eq!(results[2].role, "replica"); + assert_eq!(results[2].container_name, "postgres-mydb-2"); + assert_eq!(results[2].ordinal, 2); + } + + #[test] + fn test_cluster_connection_string_excludes_monitor() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + + let members = vec![ + ClusterMemberInfo { + role: "monitor".to_string(), + hostname: "10.0.0.1".to_string(), + port: 5432, + status: "running".to_string(), + }, + ClusterMemberInfo { + role: "primary".to_string(), + hostname: "10.0.0.2".to_string(), + port: 5432, + status: "running".to_string(), + }, + ClusterMemberInfo { + role: "replica".to_string(), + hostname: "10.0.0.3".to_string(), + port: 5433, + status: "running".to_string(), + }, + ]; + + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({ + "database": "mydb", + "username": "user", + "password": "pass" + }), + }; + + let conn = service + .cluster_connection_string(&members, &config) + .unwrap(); + + assert!( + !conn.contains("10.0.0.1"), + "Monitor should not be in connection string" + ); + assert!(conn.contains("10.0.0.2:5432")); + assert!(conn.contains("10.0.0.3:5433")); + assert!(conn.contains(",")); + assert!(conn.contains("target_session_attrs=read-write")); + } + + #[test] + fn test_cluster_connection_string_no_running_nodes() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + + let members = vec![ClusterMemberInfo { + role: "primary".to_string(), + hostname: "10.0.0.2".to_string(), + port: 5432, + status: "stopped".to_string(), + }]; + + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({"database": "db", "username": "u", "password": "p"}), + }; + + let result = service.cluster_connection_string(&members, &config); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("No running data nodes")); + } + + #[test] + fn test_build_member_params_generates_correct_monitor_env() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("ha-test".to_string(), Arc::new(docker)); + + let config = crate::externalsvc::postgres_cluster::PostgresClusterConfig { + database: "mydb".to_string(), + username: "admin".to_string(), + password: Some("secret".to_string()), + max_connections: 100, + replicas: 1, + docker_image: None, + ssl_mode: "prefer".to_string(), + }; + + let monitor_spec = ClusterMemberSpec { + role: "monitor".to_string(), + node_id: None, + ordinal: 0, + hostname: Some("10.0.0.1".to_string()), + }; + + let params = service.build_member_params(&monitor_spec, &config, "10.0.0.1", 6100, 6100); + + assert!( + !params.environment.contains_key("MONITOR_URI"), + "Monitor should not have MONITOR_URI" + ); + assert_eq!(params.container_name, "postgres-ha-test-monitor"); + } + + #[test] + fn test_build_member_params_data_node_has_monitor_uri() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("ha-test".to_string(), Arc::new(docker)); + + let config = crate::externalsvc::postgres_cluster::PostgresClusterConfig { + database: "mydb".to_string(), + username: "admin".to_string(), + password: Some("secret".to_string()), + max_connections: 100, + replicas: 1, + docker_image: None, + ssl_mode: "prefer".to_string(), + }; + + let primary_spec = ClusterMemberSpec { + role: "primary".to_string(), + node_id: Some(1), + ordinal: 1, + hostname: Some("10.0.0.2".to_string()), + }; + + let params = service.build_member_params(&primary_spec, &config, "10.0.0.1", 6100, 6101); + + assert!(params.environment.contains_key("MONITOR_URI")); + assert!(params + .environment + .get("MONITOR_URI") + .unwrap() + .contains("10.0.0.1:6100")); + assert_eq!(params.environment.get("NODE_HOSTNAME").unwrap(), "10.0.0.2"); + assert_eq!(params.container_name, "postgres-ha-test-1"); + } + + // ----------------------------------------------------------------------- + // Docker integration tests — full cluster lifecycle + // ----------------------------------------------------------------------- + + #[cfg(feature = "docker-tests")] + mod docker_tests { + use bollard::exec::CreateExecOptions; + use bollard::models::*; + use bollard::query_parameters::*; + use bollard::Docker; + use futures::StreamExt; + use std::sync::Arc; + use std::time::{Duration, Instant}; + + use crate::externalsvc::postgres_cluster::PostgresClusterService; + use crate::externalsvc::ExternalService; + + const TEST_IMAGE: &str = "gotempsh/postgres-ha:18-bookworm"; + const NETWORK_NAME: &str = "pg-cluster-integration-test"; + const TEST_DB: &str = "testdb"; + const TEST_USER: &str = "testuser"; + const TEST_PASSWORD: &str = "testpassword123"; + + async fn connect_docker() -> Option> { + let docker = match Docker::connect_with_local_defaults() { + Ok(d) => Arc::new(d), + Err(e) => { + println!("Docker not available, skipping: {}", e); + return None; + } + }; + if docker.ping().await.is_err() { + println!("Docker daemon not responding, skipping"); + return None; + } + Some(docker) + } + + async fn image_available(docker: &Docker) -> bool { + match docker.inspect_image(TEST_IMAGE).await { + Ok(_) => true, + Err(_) => { + println!( + "Image {} not found. Build it first:\n docker build -t {} ~/poc/postgres-poc/", + TEST_IMAGE, TEST_IMAGE + ); + false + } + } + } + + async fn create_network(docker: &Docker) -> anyhow::Result<()> { + let _ = docker.remove_network(NETWORK_NAME).await; + docker + .create_network(NetworkCreateRequest { + name: NETWORK_NAME.to_string(), + driver: Some("bridge".to_string()), + ..Default::default() + }) + .await + .map_err(|e| anyhow::anyhow!("Failed to create network: {}", e))?; + Ok(()) + } + + async fn create_container( + docker: &Docker, + name: &str, + env: Vec, + cmd: Vec, + ) -> anyhow::Result { + let _ = docker + .remove_container( + name, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await; + + let config = ContainerCreateBody { + image: Some(TEST_IMAGE.to_string()), + cmd: Some(cmd), + env: Some(env), + hostname: Some(name.to_string()), + user: Some("postgres".to_string()), + host_config: Some(HostConfig { + network_mode: Some(NETWORK_NAME.to_string()), + ..Default::default() + }), + ..Default::default() + }; + + let container = docker + .create_container( + Some(CreateContainerOptionsBuilder::new().name(name).build()), + config, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create container {}: {}", name, e))?; + + docker + .start_container(&container.id, None::) + .await + .map_err(|e| anyhow::anyhow!("Failed to start container {}: {}", name, e))?; + + println!(" Started container: {} ({})", name, &container.id[..12]); + Ok(container.id) + } + + async fn wait_for_postgres( + docker: &Docker, + container_id: &str, + container_name: &str, + timeout_secs: u64, + ) -> anyhow::Result<()> { + let start = Instant::now(); + let timeout = Duration::from_secs(timeout_secs); + + loop { + if start.elapsed() > timeout { + let mut log_stream = docker.logs( + container_id, + Some(LogsOptions { + tail: "30".to_string(), + stdout: true, + stderr: true, + ..Default::default() + }), + ); + let mut log_text = String::new(); + while let Some(Ok(chunk)) = log_stream.next().await { + log_text.push_str(&chunk.to_string()); + } + return Err(anyhow::anyhow!( + "Container {} did not become healthy within {}s. Last logs:\n{}", + container_name, + timeout_secs, + log_text + )); + } + + let exec = docker + .create_exec( + container_id, + CreateExecOptions { + cmd: Some(vec!["pg_isready", "-h", "localhost", "-p", "5432"]), + attach_stdout: Some(true), + attach_stderr: Some(true), + ..Default::default() + }, + ) + .await; + + if let Ok(exec) = exec { + let _ = docker.start_exec(&exec.id, None).await; + if let Ok(inspect) = docker.inspect_exec(&exec.id).await { + if inspect.exit_code == Some(0) { + println!(" {} is ready", container_name); + return Ok(()); + } + } + } + + tokio::time::sleep(Duration::from_secs(2)).await; + } + } + + async fn exec_sql( + docker: &Docker, + container_id: &str, + sql: &str, + db: &str, + user: &str, + ) -> anyhow::Result { + let exec = docker + .create_exec( + container_id, + CreateExecOptions { + cmd: Some(vec!["psql", "-U", user, "-d", db, "-t", "-A", "-c", sql]), + attach_stdout: Some(true), + attach_stderr: Some(true), + ..Default::default() + }, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create exec: {}", e))?; + + let output = docker + .start_exec(&exec.id, None) + .await + .map_err(|e| anyhow::anyhow!("Failed to start exec: {}", e))?; + + let mut result = String::new(); + if let bollard::exec::StartExecResults::Attached { mut output, .. } = output { + while let Some(Ok(chunk)) = output.next().await { + result.push_str(&chunk.to_string()); + } + } + + // Check exit code + let inspect = docker.inspect_exec(&exec.id).await?; + if let Some(code) = inspect.exit_code { + if code != 0 { + return Err(anyhow::anyhow!( + "SQL command failed (exit code {}): {}", + code, + result.trim() + )); + } + } + + Ok(result.trim().to_string()) + } + + async fn get_cluster_state(docker: &Docker, monitor_id: &str) -> anyhow::Result { + let exec = docker + .create_exec( + monitor_id, + CreateExecOptions { + cmd: Some(vec![ + "pg_autoctl", + "show", + "state", + "--pgdata", + "/var/lib/postgresql/monitor", + ]), + attach_stdout: Some(true), + attach_stderr: Some(true), + ..Default::default() + }, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create exec: {}", e))?; + + let output = docker + .start_exec(&exec.id, None) + .await + .map_err(|e| anyhow::anyhow!("Failed to start exec: {}", e))?; + + let mut result = String::new(); + if let bollard::exec::StartExecResults::Attached { mut output, .. } = output { + while let Some(Ok(chunk)) = output.next().await { + result.push_str(&chunk.to_string()); + } + } + Ok(result) + } + + async fn wait_for_replication( + docker: &Docker, + monitor_id: &str, + timeout_secs: u64, + ) -> anyhow::Result<()> { + let start = Instant::now(); + let timeout = Duration::from_secs(timeout_secs); + + loop { + if start.elapsed() > timeout { + let state = get_cluster_state(docker, monitor_id) + .await + .unwrap_or_default(); + return Err(anyhow::anyhow!( + "Replication not established within {}s. State:\n{}", + timeout_secs, + state + )); + } + + let state = get_cluster_state(docker, monitor_id) + .await + .unwrap_or_default(); + if state.contains("primary") && state.contains("secondary") { + println!(" Replication established:\n{}", state); + return Ok(()); + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } + } + + async fn wait_for_cluster_state( + docker: &Docker, + monitor_id: &str, + expected_pattern: &str, + timeout_secs: u64, + ) -> anyhow::Result { + let start = Instant::now(); + let timeout = Duration::from_secs(timeout_secs); + + loop { + if start.elapsed() > timeout { + let state = get_cluster_state(docker, monitor_id) + .await + .unwrap_or_default(); + return Err(anyhow::anyhow!( + "Expected state '{}' not reached within {}s. State:\n{}", + expected_pattern, + timeout_secs, + state + )); + } + + let state = get_cluster_state(docker, monitor_id) + .await + .unwrap_or_default(); + if state.contains(expected_pattern) { + return Ok(state); + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } + } + + async fn cleanup(docker: &Docker, container_ids: &[&str]) { + println!("\n Cleaning up..."); + for id in container_ids { + let _ = docker + .stop_container( + id, + Some(StopContainerOptions { + t: Some(5), + signal: None, + }), + ) + .await; + let _ = docker + .remove_container( + id, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await; + } + let _ = docker.remove_network(NETWORK_NAME).await; + println!(" Cleanup complete"); + } + + fn monitor_cmd(hostname: &str) -> Vec { + vec![ + "bash".to_string(), + "-c".to_string(), + format!( + concat!( + "PGDATA=/var/lib/postgresql/monitor\n", + "if [ ! -f \"$PGDATA/pg_autoctl.cfg\" ]; then\n", + " pg_autoctl create monitor \\\n", + " --pgdata \"$PGDATA\" \\\n", + " --pgport 5432 \\\n", + " --hostname {} \\\n", + " --auth trust \\\n", + " --ssl-self-signed;\n", + "fi\n", + "rm -f /tmp/pg_autoctl/*.pid /tmp/pg_autoctl/*/*.pid\n", + "exec pg_autoctl run --pgdata \"$PGDATA\"" + ), + hostname + ), + ] + } + + fn node_cmd() -> Vec { + vec![ + "bash".to_string(), + "-c".to_string(), + [ + "PGDATA=/var/lib/postgresql/pgdata", + "if [ ! -f \"$PGDATA/pg_autoctl.cfg\" ]; then", + " pg_autoctl create postgres \\", + " --pgdata \"$PGDATA\" \\", + " --pgport 5432 \\", + " --hostname \"$NODE_HOSTNAME\" \\", + " --name \"$NODE_NAME\" \\", + " --dbname testdb \\", + " --auth trust \\", + " --ssl-self-signed \\", + " --monitor \"$MONITOR_URI\";", + "fi", + "rm -f /tmp/pg_autoctl/*.pid /tmp/pg_autoctl/*/*.pid", + "exec pg_autoctl run --pgdata \"$PGDATA\"", + ] + .join("\n"), + ] + } + + // ================================================================= + // TEST: Full cluster lifecycle — replication + failover + recovery + // ================================================================= + + #[tokio::test] + async fn test_cluster_integration_docker_lifecycle() { + let docker = match connect_docker().await { + Some(d) => d, + None => return, + }; + + if !image_available(&docker).await { + return; + } + + println!("\n=== PostgreSQL HA Cluster Integration Test ===\n"); + + create_network(&docker) + .await + .expect("Failed to create network"); + + let monitor_name = "pg-itest-monitor"; + let node1_name = "pg-itest-node1"; + let node2_name = "pg-itest-node2"; + + // Start monitor + println!("1. Starting monitor..."); + let monitor_id = + create_container(&docker, monitor_name, vec![], monitor_cmd(monitor_name)) + .await + .expect("Failed to create monitor"); + + wait_for_postgres(&docker, &monitor_id, monitor_name, 60) + .await + .expect("Monitor did not become healthy"); + + // Start node1 (will become primary) + println!("\n2. Starting node1 (primary)..."); + let node1_id = create_container( + &docker, + node1_name, + vec![ + format!( + "MONITOR_URI=postgresql://autoctl_node@{}:5432/pg_auto_failover", + monitor_name + ), + format!("NODE_HOSTNAME={}", node1_name), + "NODE_NAME=node-1".to_string(), + format!("POSTGRES_USER={}", TEST_USER), + format!("POSTGRES_PASSWORD={}", TEST_PASSWORD), + format!("POSTGRES_DB={}", TEST_DB), + ], + node_cmd(), + ) + .await + .expect("Failed to create node1"); + + wait_for_postgres(&docker, &node1_id, node1_name, 90) + .await + .expect("Node1 did not become healthy"); + + // Start node2 (will become replica/secondary) + println!("\n3. Starting node2 (replica)..."); + let node2_id = create_container( + &docker, + node2_name, + vec![ + format!( + "MONITOR_URI=postgresql://autoctl_node@{}:5432/pg_auto_failover", + monitor_name + ), + format!("NODE_HOSTNAME={}", node2_name), + "NODE_NAME=node-2".to_string(), + format!("POSTGRES_USER={}", TEST_USER), + format!("POSTGRES_PASSWORD={}", TEST_PASSWORD), + format!("POSTGRES_DB={}", TEST_DB), + ], + node_cmd(), + ) + .await + .expect("Failed to create node2"); + + wait_for_postgres(&docker, &node2_id, node2_name, 90) + .await + .expect("Node2 did not become healthy"); + + // Wait for replication + println!("\n4. Waiting for replication..."); + wait_for_replication(&docker, &monitor_id, 120) + .await + .expect("Replication not established"); + + // Create table and insert 100k rows on primary + println!("\n5. Creating table and inserting 100k rows on primary..."); + + // pg_autoctl creates the db via --dbname, but create it explicitly in case + let _ = exec_sql( + &docker, + &node1_id, + &format!("CREATE DATABASE {}", TEST_DB), + "postgres", + "postgres", + ) + .await; + + tokio::time::sleep(Duration::from_secs(2)).await; + + exec_sql( + &docker, + &node1_id, + "CREATE TABLE IF NOT EXISTS test_data (id SERIAL PRIMARY KEY, value TEXT NOT NULL, created_at TIMESTAMPTZ DEFAULT NOW())", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to create table"); + + exec_sql( + &docker, + &node1_id, + "INSERT INTO test_data (value) SELECT 'row-' || generate_series(1, 100000)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to insert rows"); + + let primary_count = exec_sql( + &docker, + &node1_id, + "SELECT COUNT(*) FROM test_data", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count rows on primary"); + + println!(" Primary row count: {}", primary_count); + assert_eq!(primary_count, "100000", "Primary should have 100k rows"); + + // Verify replication + println!("\n6. Verifying replication to node2..."); + tokio::time::sleep(Duration::from_secs(5)).await; + + let replica_count = exec_sql( + &docker, + &node2_id, + "SELECT COUNT(*) FROM test_data", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count rows on replica"); + + println!(" Replica row count: {}", replica_count); + assert_eq!( + replica_count, "100000", + "Replica should have 100k rows via replication" + ); + + // Failover: Stop the primary + println!("\n7. Stopping primary (node1) to trigger failover..."); + docker + .stop_container( + &node1_id, + Some(StopContainerOptions { + t: Some(5), + signal: None, + }), + ) + .await + .expect("Failed to stop node1"); + + println!(" Waiting for failover..."); + let state = wait_for_cluster_state(&docker, &monitor_id, "wait_primary", 90) + .await + .expect("Failover did not complete"); + println!(" Post-failover state:\n{}", state); + + // Verify node2 is writable + println!("\n8. Verifying node2 is writable after failover..."); + exec_sql( + &docker, + &node2_id, + "INSERT INTO test_data (value) SELECT 'failover-row-' || generate_series(1, 1000)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Should be able to write to new primary after failover"); + + let new_count = exec_sql( + &docker, + &node2_id, + "SELECT COUNT(*) FROM test_data", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count after failover writes"); + + println!(" New primary total rows: {}", new_count); + assert_eq!( + new_count, "101000", + "New primary should have 101k rows (100k + 1k failover)" + ); + + // Recovery: Restart old primary + println!("\n9. Restarting old primary (node1) — should rejoin as secondary..."); + docker + .start_container(&node1_id, None::) + .await + .expect("Failed to restart node1"); + + println!(" Waiting for node1 to rejoin..."); + let state = wait_for_cluster_state(&docker, &monitor_id, "secondary", 120) + .await + .expect("Node1 did not rejoin as secondary"); + println!(" Post-recovery state:\n{}", state); + + // Verify data integrity on recovered node + println!("\n10. Verifying data integrity on recovered node..."); + wait_for_postgres(&docker, &node1_id, node1_name, 60) + .await + .expect("Recovered node1 not healthy"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + let recovered_count = exec_sql( + &docker, + &node1_id, + "SELECT COUNT(*) FROM test_data", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count rows on recovered node"); + + println!(" Recovered node row count: {}", recovered_count); + assert_eq!( + recovered_count, "101000", + "Recovered node should have all 101k rows" + ); + + // Verify PostgresClusterService integration + println!("\n11. Verifying PostgresClusterService integration..."); + let service = PostgresClusterService::new("itest".to_string(), docker.clone()); + assert!(service.supports_cluster()); + let roles = service.valid_cluster_roles(); + assert!(roles.contains(&"monitor")); + assert!(roles.contains(&"primary")); + assert!(roles.contains(&"replica")); + + println!("\n=== All cluster integration tests passed! ===\n"); + + cleanup(&docker, &[&monitor_id, &node1_id, &node2_id]).await; + } + + // ================================================================= + // TEST: Member recovery — kill and restart a replica + // ================================================================= + + async fn create_container_in_network( + docker: &Docker, + name: &str, + env: Vec, + cmd: Vec, + network: &str, + ) -> anyhow::Result { + let _ = docker + .remove_container( + name, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await; + + let config = ContainerCreateBody { + image: Some(TEST_IMAGE.to_string()), + cmd: Some(cmd), + env: Some(env), + hostname: Some(name.to_string()), + user: Some("postgres".to_string()), + host_config: Some(HostConfig { + network_mode: Some(network.to_string()), + ..Default::default() + }), + ..Default::default() + }; + + let container = docker + .create_container( + Some(CreateContainerOptionsBuilder::new().name(name).build()), + config, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create container {}: {}", name, e))?; + + docker + .start_container(&container.id, None::) + .await + .map_err(|e| anyhow::anyhow!("Failed to start container {}: {}", name, e))?; + + println!(" Started container: {} ({})", name, &container.id[..12]); + Ok(container.id) + } + + #[tokio::test] + async fn test_cluster_integration_docker_member_recovery() { + let docker = match connect_docker().await { + Some(d) => d, + None => return, + }; + + if !image_available(&docker).await { + return; + } + + println!("\n=== PostgreSQL HA Member Recovery Test ===\n"); + + let network = "pg-cluster-recovery-test"; + let _ = docker.remove_network(network).await; + docker + .create_network(NetworkCreateRequest { + name: network.to_string(), + driver: Some("bridge".to_string()), + ..Default::default() + }) + .await + .expect("Failed to create network"); + + let monitor_name = "pg-itest2-monitor"; + let node1_name = "pg-itest2-node1"; + let node2_name = "pg-itest2-node2"; + + println!("1. Starting cluster..."); + + let monitor_id = create_container_in_network( + &docker, + monitor_name, + vec![], + monitor_cmd(monitor_name), + network, + ) + .await + .expect("Failed to create monitor"); + wait_for_postgres(&docker, &monitor_id, monitor_name, 60) + .await + .expect("Monitor not healthy"); + + let node1_id = create_container_in_network( + &docker, + node1_name, + vec![ + format!( + "MONITOR_URI=postgresql://autoctl_node@{}:5432/pg_auto_failover", + monitor_name + ), + format!("NODE_HOSTNAME={}", node1_name), + "NODE_NAME=node-1".to_string(), + format!("POSTGRES_USER={}", TEST_USER), + format!("POSTGRES_PASSWORD={}", TEST_PASSWORD), + format!("POSTGRES_DB={}", TEST_DB), + ], + node_cmd(), + network, + ) + .await + .expect("Failed to create node1"); + wait_for_postgres(&docker, &node1_id, node1_name, 90) + .await + .expect("Node1 not healthy"); + + let node2_id = create_container_in_network( + &docker, + node2_name, + vec![ + format!( + "MONITOR_URI=postgresql://autoctl_node@{}:5432/pg_auto_failover", + monitor_name + ), + format!("NODE_HOSTNAME={}", node2_name), + "NODE_NAME=node-2".to_string(), + format!("POSTGRES_USER={}", TEST_USER), + format!("POSTGRES_PASSWORD={}", TEST_PASSWORD), + format!("POSTGRES_DB={}", TEST_DB), + ], + node_cmd(), + network, + ) + .await + .expect("Failed to create node2"); + wait_for_postgres(&docker, &node2_id, node2_name, 90) + .await + .expect("Node2 not healthy"); + + wait_for_replication(&docker, &monitor_id, 120) + .await + .expect("Replication not established"); + + // Insert initial data + println!("\n2. Inserting test data..."); + let _ = exec_sql( + &docker, + &node1_id, + &format!("CREATE DATABASE {}", TEST_DB), + "postgres", + "postgres", + ) + .await; + tokio::time::sleep(Duration::from_secs(2)).await; + + exec_sql( + &docker, + &node1_id, + "CREATE TABLE IF NOT EXISTS recovery_test (id SERIAL PRIMARY KEY, value TEXT)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to create table"); + + exec_sql( + &docker, + &node1_id, + "INSERT INTO recovery_test (value) SELECT 'initial-' || generate_series(1, 10000)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to insert initial data"); + + tokio::time::sleep(Duration::from_secs(3)).await; + + // Kill replica + println!("\n3. Killing replica (node2)..."); + docker + .stop_container( + &node2_id, + Some(StopContainerOptions { + t: Some(2), + signal: None, + }), + ) + .await + .expect("Failed to stop node2"); + + // Insert more data while replica is down + println!(" Inserting data while replica is down..."); + exec_sql( + &docker, + &node1_id, + "INSERT INTO recovery_test (value) SELECT 'during-outage-' || generate_series(1, 5000)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to insert during outage"); + + // Restart replica + println!("\n4. Restarting replica (node2)..."); + docker + .start_container(&node2_id, None::) + .await + .expect("Failed to restart node2"); + + wait_for_postgres(&docker, &node2_id, node2_name, 90) + .await + .expect("Restarted node2 not healthy"); + + println!(" Waiting for replica to catch up..."); + wait_for_replication(&docker, &monitor_id, 120) + .await + .expect("Replication not re-established after recovery"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + // Verify data on recovered replica + let recovered_count = exec_sql( + &docker, + &node2_id, + "SELECT COUNT(*) FROM recovery_test", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count on recovered replica"); + + println!( + " Recovered replica row count: {} (expected 15000)", + recovered_count + ); + assert_eq!( + recovered_count, "15000", + "Recovered replica should have all 15k rows (10k + 5k during outage)" + ); + + // Verify ongoing replication + println!("\n5. Verifying ongoing replication..."); + exec_sql( + &docker, + &node1_id, + "INSERT INTO recovery_test (value) SELECT 'post-recovery-' || generate_series(1, 2000)", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to insert post-recovery data"); + + tokio::time::sleep(Duration::from_secs(3)).await; + + let final_count = exec_sql( + &docker, + &node2_id, + "SELECT COUNT(*) FROM recovery_test", + TEST_DB, + TEST_USER, + ) + .await + .expect("Failed to count final rows"); + + println!( + " Final replica row count: {} (expected 17000)", + final_count + ); + assert_eq!( + final_count, "17000", + "Replica should have all 17k rows after recovery + continued replication" + ); + + println!("\n=== Member recovery test passed! ===\n"); + + // Cleanup + for id in [&monitor_id, &node1_id, &node2_id] { + let _ = docker + .stop_container( + id, + Some(StopContainerOptions { + t: Some(5), + signal: None, + }), + ) + .await; + let _ = docker + .remove_container( + id, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await; + } + let _ = docker.remove_network(network).await; + } + } +} diff --git a/crates/temps-providers/src/externalsvc/mod.rs b/crates/temps-providers/src/externalsvc/mod.rs index 7c29cd4a..f8fb7f0d 100644 --- a/crates/temps-providers/src/externalsvc/mod.rs +++ b/crates/temps-providers/src/externalsvc/mod.rs @@ -6,6 +6,7 @@ use utoipa::ToSchema; pub mod mongodb; pub mod postgres; +pub mod postgres_cluster; pub mod redis; pub mod rustfs; pub mod s3; @@ -14,6 +15,10 @@ pub mod s3; #[cfg(test)] pub mod test_utils; +// Integration tests for service clusters +#[cfg(test)] +mod cluster_integration_tests; + /// Shared mutex for tests that mutate the DEPLOYMENT_MODE environment variable. /// This must be shared across all test modules (postgres, redis, etc.) because /// env vars are process-global — a module-local mutex doesn't prevent cross-module races. @@ -23,6 +28,7 @@ pub(crate) static DEPLOYMENT_MODE_MUTEX: std::sync::Mutex<()> = std::sync::Mutex // Re-export services for easier access pub use mongodb::MongodbService; pub use postgres::PostgresService; +pub use postgres_cluster::PostgresClusterService; pub use redis::RedisService; pub use rustfs::RustfsService; pub use s3::S3Service; @@ -318,6 +324,39 @@ pub struct AvailableContainer { pub exposed_ports: Vec, } +/// Specification for a cluster member to be created. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClusterMemberSpec { + /// Service-type-specific role (e.g., "monitor", "primary", "replica", "arbiter", "sentinel", "node") + pub role: String, + /// Target worker node ID. None = local (control plane). + pub node_id: Option, + /// Stable ordinal for this member (0, 1, 2, ...) + pub ordinal: i32, + /// WireGuard IP or hostname for inter-member communication + pub hostname: Option, +} + +/// Result from initializing a single cluster member. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClusterMemberResult { + pub ordinal: i32, + pub role: String, + pub container_id: String, + pub container_name: String, + pub port: Option, + pub status: String, +} + +/// Info about an existing cluster member, used for connection string generation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClusterMemberInfo { + pub role: String, + pub hostname: String, + pub port: i32, + pub status: String, +} + #[async_trait] #[allow(clippy::too_many_arguments)] pub trait ExternalService: Send + Sync { @@ -483,6 +522,54 @@ pub trait ExternalService: Send + Sync { )) } + // ----------------------------------------------------------------------- + // Cluster lifecycle methods (opt-in for service types that support clustering) + // ----------------------------------------------------------------------- + + /// Whether this service type supports cluster topology. + fn supports_cluster(&self) -> bool { + false + } + + /// Valid roles for this service type in cluster mode. + /// Used for validation when creating or modifying cluster members. + fn valid_cluster_roles(&self) -> Vec<&'static str> { + Vec::new() + } + + /// Initialize a cluster with the given member specifications. + /// Members must be created in the returned order (monitor first, then primary, then replicas). + /// + /// Returns a Vec of `ClusterMemberResult` with container details for each member. + async fn init_cluster( + &self, + _config: ServiceConfig, + _members: Vec, + ) -> Result> { + Err(anyhow::anyhow!( + "Cluster mode not supported for service type {}", + self.get_type() + )) + } + + /// Build the connection string for a cluster, given all member addresses. + /// E.g., multi-host libpq for Postgres, replica set URI for MongoDB. + fn cluster_connection_string( + &self, + _members: &[ClusterMemberInfo], + _config: &ServiceConfig, + ) -> Result { + Err(anyhow::anyhow!( + "Cluster connection string not supported for service type {}", + self.get_type() + )) + } + + /// Get the Docker image to use for cluster members (may differ from standalone). + fn get_cluster_docker_image(&self) -> (String, String) { + self.get_default_docker_image() + } + /// Import an existing running Docker container as a managed service /// User provides container ID and necessary credentials/configuration /// diff --git a/crates/temps-providers/src/externalsvc/postgres.rs b/crates/temps-providers/src/externalsvc/postgres.rs index bdf57cb9..8b0549b1 100644 --- a/crates/temps-providers/src/externalsvc/postgres.rs +++ b/crates/temps-providers/src/externalsvc/postgres.rs @@ -154,7 +154,7 @@ fn default_username() -> String { "postgres".to_string() } -fn generate_password() -> String { +pub fn generate_password() -> String { use rand::{distributions::Alphanumeric, Rng}; rand::thread_rng() .sample_iter(&Alphanumeric) diff --git a/crates/temps-providers/src/externalsvc/postgres_cluster.rs b/crates/temps-providers/src/externalsvc/postgres_cluster.rs new file mode 100644 index 00000000..24355720 --- /dev/null +++ b/crates/temps-providers/src/externalsvc/postgres_cluster.rs @@ -0,0 +1,793 @@ +use anyhow::Result; +use async_trait::async_trait; +use bollard::Docker; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use tracing::info; + +use super::{ + ClusterMemberInfo, ClusterMemberResult, ClusterMemberSpec, ExternalService, RuntimeEnvVar, + ServiceConfig, ServiceType, +}; + +/// Default Docker image for pg_auto_failover cluster nodes. +const DEFAULT_CLUSTER_IMAGE: &str = "gotempsh/postgres-ha:18-bookworm"; + +/// PostgreSQL HA cluster service using pg_auto_failover. +/// +/// Topology: +/// - 1 monitor node (lightweight Postgres instance for orchestration) +/// - 1 primary node +/// - N replica nodes (default: 1) +/// +/// Each member is a separate Docker container that can run on different worker nodes. +/// pg_autoctl handles replication setup, health monitoring, and automatic failover. +pub struct PostgresClusterService { + name: String, + #[allow(dead_code)] + docker: Arc, +} + +/// Configuration for a PostgreSQL HA cluster. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +pub struct PostgresClusterConfig { + /// Database name + #[serde(default = "default_database")] + pub database: String, + /// Database username + #[serde(default = "default_username")] + pub username: String, + /// Database password (auto-generated if not provided) + pub password: Option, + /// Max connections per node + #[serde(default = "default_max_connections")] + pub max_connections: u32, + /// Number of replicas (default: 1) + #[serde(default = "default_replicas")] + pub replicas: u32, + /// Docker image for cluster nodes + pub docker_image: Option, + /// SSL mode between cluster members + #[serde(default = "default_ssl_mode")] + pub ssl_mode: String, +} + +fn default_database() -> String { + "postgres".to_string() +} +fn default_username() -> String { + "postgres".to_string() +} +fn default_max_connections() -> u32 { + 100 +} +fn default_replicas() -> u32 { + 1 +} +fn default_ssl_mode() -> String { + "prefer".to_string() +} + +impl PostgresClusterService { + pub fn new(name: String, docker: Arc) -> Self { + Self { name, docker } + } + + /// Container name for the monitor member. + fn monitor_container_name(&self) -> String { + format!("postgres-{}-monitor", self.name) + } + + /// Container name for a data node member by ordinal. + fn node_container_name(&self, ordinal: i32) -> String { + format!("postgres-{}-{}", self.name, ordinal) + } + + /// Parse cluster config from ServiceConfig parameters. + fn parse_config(config: &ServiceConfig) -> Result { + let cluster_config: PostgresClusterConfig = + serde_json::from_value(config.parameters.clone()) + .map_err(|e| anyhow::anyhow!("Invalid cluster config: {}", e))?; + Ok(cluster_config) + } + + /// Build environment variables for the monitor container. + /// + /// `monitor_hostname` is the address the monitor advertises to data nodes. + /// For remote workers this is the WireGuard/private IP; for local it is the container name. + /// `monitor_port` is the port the monitor listens on (inside the container). + fn monitor_env(&self, monitor_hostname: &str, monitor_port: u16) -> HashMap { + let mut env = HashMap::new(); + env.insert("MONITOR_HOSTNAME".to_string(), monitor_hostname.to_string()); + env.insert("MONITOR_PORT".to_string(), monitor_port.to_string()); + env + } + + /// Build environment variables for a data node container. + /// + /// `monitor_port` is the port the monitor listens on (the mapped host port + /// when using bridge networking, or the container port with host networking). + /// `node_port` is the port this node will listen on. + fn node_env( + &self, + config: &PostgresClusterConfig, + monitor_hostname: &str, + monitor_port: u16, + node_hostname: &str, + node_port: u16, + node_name: &str, + ) -> HashMap { + let mut env = HashMap::new(); + env.insert("NODE_HOSTNAME".to_string(), node_hostname.to_string()); + env.insert("NODE_PORT".to_string(), node_port.to_string()); + env.insert("NODE_NAME".to_string(), node_name.to_string()); + env.insert( + "MONITOR_URI".to_string(), + format!( + "postgresql://autoctl_node@{}:{}/pg_auto_failover", + monitor_hostname, monitor_port + ), + ); + env.insert("POSTGRES_USER".to_string(), config.username.clone()); + env.insert( + "POSTGRES_PASSWORD".to_string(), + config + .password + .clone() + .unwrap_or_else(super::postgres::generate_password), + ); + env.insert("POSTGRES_DB".to_string(), config.database.clone()); + env + } + + /// Build the startup command for the monitor container. + /// + /// The hostname is passed via the `MONITOR_HOSTNAME` environment variable + /// so that it can be set to the worker node's WireGuard/private address + /// when the monitor runs on a remote node. + fn monitor_command(&self) -> Vec { + // The entrypoint script handles: + // 1. pg_autoctl create monitor (if not initialized) + // 2. Remove stale pidfile (prevents "already running with PID 1" on restart) + // 3. pg_autoctl run + // + // Runs as the `postgres` user because pg_ctl refuses to run as root. + vec![ + "bash".to_string(), + "-c".to_string(), + [ + "PGDATA=/var/lib/postgresql/monitor", + "chown -R postgres:postgres /var/lib/postgresql", + "if [ ! -f \"$PGDATA/pg_autoctl.cfg\" ]; then", + " gosu postgres pg_autoctl create monitor \\", + " --pgdata \"$PGDATA\" \\", + " --pgport \"$MONITOR_PORT\" \\", + " --hostname \"$MONITOR_HOSTNAME\" \\", + " --auth trust \\", + " --ssl-self-signed;", + "fi", + // After creation (or on restart), ensure pg_hba.conf allows + // autoctl_node connections via trust over the network. + // --ssl-self-signed sets cert auth for SSL connections, but + // data nodes need trust for the initial registration handshake + // before pg_autoctl has issued them client certificates. + "HBA=\"$PGDATA/pg_hba.conf\"", + "if ! grep -q 'autoctl_node.*0\\.0\\.0\\.0/0' \"$HBA\" 2>/dev/null; then", + " echo 'hostssl pg_auto_failover autoctl_node 0.0.0.0/0 trust' >> \"$HBA\"", + " echo 'hostssl pg_auto_failover autoctl_node ::/0 trust' >> \"$HBA\"", + " gosu postgres pg_ctl reload -D \"$PGDATA\" 2>/dev/null || true", + "fi", + "rm -f /tmp/pg_autoctl/*.pid /tmp/pg_autoctl/*/*.pid", + "exec gosu postgres pg_autoctl run --pgdata \"$PGDATA\"", + ] + .join("\n"), + ] + } + + /// Build the startup command for a data node container. + fn node_command(&self) -> Vec { + // The entrypoint script handles: + // 1. Launch a background HBA patcher that waits for pg_hba.conf to appear + // and immediately adds trust entries for replication connections. + // This MUST run concurrently with pg_autoctl create because the FSM + // transition (primary → catchingup) happens inside `create` before + // the command returns — sequential patching is too late. + // 2. pg_autoctl create postgres (if not initialized) — connects to monitor + // 3. Remove stale pidfile (prevents "already running with PID 1" on restart) + // 4. pg_autoctl run — keeps running, handles replication and failover + // + // Runs as the `postgres` user because pg_ctl refuses to run as root. + vec![ + "bash".to_string(), + "-c".to_string(), + [ + "PGDATA=/var/lib/postgresql/pgdata", + "chown -R postgres:postgres /var/lib/postgresql", + // Background HBA patcher: polls for pg_hba.conf and patches it + // as soon as it exists. Needed because --ssl-self-signed sets + // cert auth, but remote cluster members need trust auth for + // pgautofailover_replicator (replication) and autoctl_node + // (monitor communication) before certificates are exchanged. + "(", + " while true; do", + " HBA=\"$PGDATA/pg_hba.conf\"", + " if [ -f \"$HBA\" ]; then", + " if ! grep -q 'pgautofailover_replicator.*0\\.0\\.0\\.0/0' \"$HBA\" 2>/dev/null; then", + " echo 'hostssl replication pgautofailover_replicator 0.0.0.0/0 trust' >> \"$HBA\"", + " echo 'hostssl replication pgautofailover_replicator ::/0 trust' >> \"$HBA\"", + " echo 'host replication pgautofailover_replicator 0.0.0.0/0 trust' >> \"$HBA\"", + " echo 'host replication pgautofailover_replicator ::/0 trust' >> \"$HBA\"", + " echo 'hostssl all pgautofailover_replicator 0.0.0.0/0 trust' >> \"$HBA\"", + " echo 'hostssl all pgautofailover_replicator ::/0 trust' >> \"$HBA\"", + " echo 'host all pgautofailover_replicator 0.0.0.0/0 trust' >> \"$HBA\"", + " echo 'host all pgautofailover_replicator ::/0 trust' >> \"$HBA\"", + " gosu postgres pg_ctl reload -D \"$PGDATA\" 2>/dev/null || true", + " fi", + " break", + " fi", + " sleep 0.5", + " done", + ") &", + "if [ ! -f \"$PGDATA/pg_autoctl.cfg\" ]; then", + " gosu postgres pg_autoctl create postgres \\", + " --pgdata \"$PGDATA\" \\", + " --pgport \"$NODE_PORT\" \\", + " --hostname \"$NODE_HOSTNAME\" \\", + " --name \"$NODE_NAME\" \\", + " --auth trust \\", + " --ssl-self-signed \\", + " --monitor \"$MONITOR_URI\";", + "fi", + "rm -f /tmp/pg_autoctl/*.pid /tmp/pg_autoctl/*/*.pid", + "exec gosu postgres pg_autoctl run --pgdata \"$PGDATA\"", + ] + .join("\n"), + ] + } +} + +#[async_trait] +impl ExternalService for PostgresClusterService { + async fn init(&self, _config: ServiceConfig) -> Result> { + // Cluster services use init_cluster instead + Err(anyhow::anyhow!( + "Use init_cluster for PostgresClusterService — standalone init not supported" + )) + } + + async fn health_check(&self) -> Result { + // Cluster health is checked per-member by the ExternalServiceManager + Ok(true) + } + + fn get_type(&self) -> ServiceType { + ServiceType::Postgres + } + + fn get_name(&self) -> String { + format!("postgres-cluster-{}", self.name) + } + + fn get_connection_info(&self) -> Result { + // Connection info is generated from cluster members by the manager + Ok(format!( + "postgres-cluster-{} (use cluster endpoint)", + self.name + )) + } + + async fn cleanup(&self) -> Result<()> { + Ok(()) + } + + fn get_parameter_schema(&self) -> Option { + let schema = schemars::schema_for!(PostgresClusterConfig); + serde_json::to_value(schema).ok() + } + + async fn start(&self) -> Result<()> { + // Cluster start is managed per-member + Ok(()) + } + + async fn stop(&self) -> Result<()> { + // Cluster stop is managed per-member + Ok(()) + } + + async fn remove(&self) -> Result<()> { + // Cluster removal is managed per-member + Ok(()) + } + + fn get_environment_variables( + &self, + parameters: &HashMap, + ) -> Result> { + let mut env = HashMap::new(); + let user = parameters.get("username").cloned().unwrap_or_default(); + let password = parameters.get("password").cloned().unwrap_or_default(); + let database = parameters.get("database").cloned().unwrap_or_default(); + + // For clusters, connection info includes all data node hosts + env.insert("POSTGRES_USER".to_string(), user); + env.insert("POSTGRES_PASSWORD".to_string(), password); + env.insert("POSTGRES_DATABASE".to_string(), database); + + Ok(env) + } + + fn get_docker_environment_variables( + &self, + parameters: &HashMap, + ) -> Result> { + self.get_environment_variables(parameters) + } + + fn get_runtime_env_definitions(&self) -> Vec { + vec![ + RuntimeEnvVar { + name: "POSTGRES_URL".to_string(), + description: "Multi-host PostgreSQL connection string with failover support" + .to_string(), + example: "postgresql://user:pass@host1:5432,host2:5432/db?target_session_attrs=read-write".to_string(), + sensitive: true, + }, + RuntimeEnvVar { + name: "POSTGRES_HOST".to_string(), + description: "Comma-separated list of PostgreSQL cluster hosts".to_string(), + example: "host1,host2".to_string(), + sensitive: false, + }, + RuntimeEnvVar { + name: "POSTGRES_PORT".to_string(), + description: "PostgreSQL port".to_string(), + example: "5432".to_string(), + sensitive: false, + }, + ] + } + + fn get_local_address(&self, _service_config: ServiceConfig) -> Result { + Ok("localhost:5432".to_string()) + } + + fn get_effective_address(&self, _service_config: ServiceConfig) -> Result<(String, String)> { + // For clusters, the effective address is the primary — but this is dynamic + Ok((self.monitor_container_name(), "5432".to_string())) + } + + fn get_docker_container_name(&self) -> String { + self.monitor_container_name() + } + + fn get_docker_internal_port(&self) -> String { + "5432".to_string() + } + + // ----------------------------------------------------------------------- + // Cluster-specific methods + // ----------------------------------------------------------------------- + + fn supports_cluster(&self) -> bool { + true + } + + fn valid_cluster_roles(&self) -> Vec<&'static str> { + vec!["monitor", "primary", "replica"] + } + + async fn init_cluster( + &self, + config: ServiceConfig, + members: Vec, + ) -> Result> { + let _cluster_config = Self::parse_config(&config)?; + // Always use the HA image for cluster members — the standalone + // postgres-walg image does not contain pg_auto_failover / pg_autoctl. + let image = DEFAULT_CLUSTER_IMAGE; + + info!( + "Initializing PostgreSQL HA cluster '{}' with {} members (image: {})", + self.name, + members.len(), + image + ); + + let mut results = Vec::new(); + + // Find the monitor member — must be initialized first + let monitor = members + .iter() + .find(|m| m.role == "monitor") + .ok_or_else(|| anyhow::anyhow!("Cluster must have exactly one monitor member"))?; + + let _monitor_hostname = monitor + .hostname + .as_deref() + .unwrap_or(&self.monitor_container_name()); + + // Create monitor container + let monitor_container_name = self.monitor_container_name(); + info!("Creating monitor container: {}", monitor_container_name); + + let monitor_result = ClusterMemberResult { + ordinal: monitor.ordinal, + role: "monitor".to_string(), + container_id: String::new(), // Filled by the manager after remote/local creation + container_name: monitor_container_name.clone(), + port: Some(5432), + status: "provisioning".to_string(), + }; + results.push(monitor_result); + + // Create data node containers (primary first, then replicas) + // pg_auto_failover automatically assigns primary to the first registered node + let mut data_nodes: Vec<&ClusterMemberSpec> = + members.iter().filter(|m| m.role != "monitor").collect(); + // Sort: primary first, then replicas by ordinal + data_nodes.sort_by(|a, b| { + let a_is_primary = if a.role == "primary" { 0 } else { 1 }; + let b_is_primary = if b.role == "primary" { 0 } else { 1 }; + a_is_primary + .cmp(&b_is_primary) + .then(a.ordinal.cmp(&b.ordinal)) + }); + + for node in &data_nodes { + let container_name = self.node_container_name(node.ordinal); + info!( + "Creating data node container: {} (role: {}, ordinal: {})", + container_name, node.role, node.ordinal + ); + + let node_result = ClusterMemberResult { + ordinal: node.ordinal, + role: node.role.clone(), + container_id: String::new(), + container_name, + port: Some(5432), + status: "provisioning".to_string(), + }; + results.push(node_result); + } + + Ok(results) + } + + fn cluster_connection_string( + &self, + members: &[ClusterMemberInfo], + config: &ServiceConfig, + ) -> Result { + let cluster_config = Self::parse_config(config)?; + + // Build multi-host libpq connection string + // Only include data nodes (not monitor) in the connection string + let data_nodes: Vec<&ClusterMemberInfo> = members + .iter() + .filter(|m| m.role != "monitor" && m.status == "running") + .collect(); + + if data_nodes.is_empty() { + return Err(anyhow::anyhow!("No running data nodes in cluster")); + } + + let hosts: Vec = data_nodes + .iter() + .map(|n| format!("{}:{}", n.hostname, n.port)) + .collect(); + + let password = cluster_config.password.unwrap_or_default(); + let encoded_password = urlencoding::encode(&password); + + // Multi-host connection string with target_session_attrs for failover + let connection_string = format!( + "postgresql://{}:{}@{}/{}?target_session_attrs=read-write", + cluster_config.username, + encoded_password, + hosts.join(","), + cluster_config.database, + ); + + Ok(connection_string) + } + + fn get_cluster_docker_image(&self) -> (String, String) { + (DEFAULT_CLUSTER_IMAGE.to_string(), "18-bookworm".to_string()) + } +} + +/// Build `RemoteServiceCreateParams`-compatible data for a cluster member. +/// This is called by `ExternalServiceManager` when dispatching member creation +/// to remote worker nodes via the agent API. +pub struct ClusterMemberCreateParams { + pub container_name: String, + pub image: String, + pub environment: HashMap, + pub command: Option>, + pub container_port: u16, + pub volume_path: String, +} + +impl PostgresClusterService { + /// Build creation parameters for a specific cluster member. + /// + /// * `monitor_hostname` — address the monitor advertises (host IP or container name) + /// * `monitor_port` — port the monitor listens on (the host-mapped port) + /// * `member_port` — port this member will listen on inside its container + /// + /// The manager uses these to create containers locally or via the agent. + pub fn build_member_params( + &self, + member: &ClusterMemberSpec, + config: &PostgresClusterConfig, + monitor_hostname: &str, + monitor_port: u16, + member_port: u16, + ) -> ClusterMemberCreateParams { + match member.role.as_str() { + "monitor" => ClusterMemberCreateParams { + container_name: self.monitor_container_name(), + // Always use the HA image — parameter_strategies may fill in the + // standalone postgres-walg image which lacks pg_autoctl. + image: DEFAULT_CLUSTER_IMAGE.to_string(), + environment: self.monitor_env(monitor_hostname, member_port), + command: Some(self.monitor_command()), + container_port: member_port, + volume_path: "/var/lib/postgresql".to_string(), + }, + _ => { + // primary or replica — same setup, pg_auto_failover assigns roles + let fallback_hostname = self.node_container_name(member.ordinal); + let node_hostname = member.hostname.as_deref().unwrap_or(&fallback_hostname); + let node_name = format!("node-{}", member.ordinal); + + ClusterMemberCreateParams { + container_name: self.node_container_name(member.ordinal), + // Always use the HA image — see monitor comment above. + image: DEFAULT_CLUSTER_IMAGE.to_string(), + environment: self.node_env( + config, + monitor_hostname, + monitor_port, + node_hostname, + member_port, + &node_name, + ), + command: Some(self.node_command()), + container_port: member_port, + volume_path: "/var/lib/postgresql".to_string(), + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_container_naming() { + let service = PostgresClusterService::new( + "my-db".to_string(), + Arc::new(Docker::connect_with_defaults().unwrap_or_else(|_| { + // Fallback for tests without Docker + Docker::connect_with_local_defaults().unwrap() + })), + ); + + assert_eq!(service.monitor_container_name(), "postgres-my-db-monitor"); + assert_eq!(service.node_container_name(1), "postgres-my-db-1"); + assert_eq!(service.node_container_name(2), "postgres-my-db-2"); + } + + #[test] + fn test_valid_cluster_roles() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + assert!(service.supports_cluster()); + assert_eq!( + service.valid_cluster_roles(), + vec!["monitor", "primary", "replica"] + ); + } + + #[test] + fn test_parse_cluster_config_defaults() { + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({}), + }; + let cluster_config = PostgresClusterService::parse_config(&config).unwrap(); + assert_eq!(cluster_config.database, "postgres"); + assert_eq!(cluster_config.username, "postgres"); + assert_eq!(cluster_config.max_connections, 100); + assert_eq!(cluster_config.replicas, 1); + } + + #[test] + fn test_parse_cluster_config_custom() { + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({ + "database": "myapp", + "username": "admin", + "password": "secret123", + "replicas": 2, + "max_connections": 200 + }), + }; + let cluster_config = PostgresClusterService::parse_config(&config).unwrap(); + assert_eq!(cluster_config.database, "myapp"); + assert_eq!(cluster_config.username, "admin"); + assert_eq!(cluster_config.password, Some("secret123".to_string())); + assert_eq!(cluster_config.replicas, 2); + assert_eq!(cluster_config.max_connections, 200); + } + + #[test] + fn test_cluster_connection_string() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + + let members = vec![ + ClusterMemberInfo { + role: "monitor".to_string(), + hostname: "10.100.0.1".to_string(), + port: 5432, + status: "running".to_string(), + }, + ClusterMemberInfo { + role: "primary".to_string(), + hostname: "10.100.0.2".to_string(), + port: 5432, + status: "running".to_string(), + }, + ClusterMemberInfo { + role: "replica".to_string(), + hostname: "10.100.0.3".to_string(), + port: 5432, + status: "running".to_string(), + }, + ]; + + let config = ServiceConfig { + name: "test".to_string(), + service_type: ServiceType::Postgres, + version: None, + parameters: serde_json::json!({ + "database": "myapp", + "username": "admin", + "password": "secret" + }), + }; + + let conn_str = service + .cluster_connection_string(&members, &config) + .unwrap(); + + // Monitor should NOT be in the connection string + assert!(!conn_str.contains("10.100.0.1")); + // Both data nodes should be present + assert!(conn_str.contains("10.100.0.2:5432")); + assert!(conn_str.contains("10.100.0.3:5432")); + // Should have multi-host format with failover + assert!(conn_str.contains("target_session_attrs=read-write")); + assert!(conn_str.starts_with("postgresql://admin:secret@")); + } + + #[test] + fn test_monitor_command_contains_ssl() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + let cmd = service.monitor_command(); + let script = &cmd[2]; + assert!(script.contains("gosu postgres pg_autoctl create monitor")); + assert!(script.contains("--ssl-self-signed")); + assert!(script.contains("--pgport \"$MONITOR_PORT\"")); + assert!(script.contains("gosu postgres pg_autoctl run")); + assert!(script.contains("$MONITOR_HOSTNAME")); + assert!(script.contains("chown -R postgres:postgres")); + // Must patch pg_hba.conf to allow autoctl_node trust auth for node registration + assert!(script.contains("autoctl_node.*trust")); + assert!(script.contains("hostssl pg_auto_failover autoctl_node 0.0.0.0/0 trust")); + } + + #[test] + fn test_node_command_contains_monitor_uri() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("test".to_string(), Arc::new(docker)); + let cmd = service.node_command(); + let script = &cmd[2]; + assert!(script.contains("gosu postgres pg_autoctl create postgres")); + assert!(script.contains("--pgport \"$NODE_PORT\"")); + assert!(script.contains("$MONITOR_URI")); + assert!(script.contains("$NODE_HOSTNAME")); + assert!(script.contains("--ssl-self-signed")); + assert!(script.contains("chown -R postgres:postgres")); + } + + #[test] + fn test_build_member_params_monitor() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("my-db".to_string(), Arc::new(docker)); + let config = PostgresClusterConfig { + database: "postgres".to_string(), + username: "postgres".to_string(), + password: Some("pass".to_string()), + max_connections: 100, + replicas: 1, + docker_image: None, + ssl_mode: "prefer".to_string(), + }; + + let spec = ClusterMemberSpec { + role: "monitor".to_string(), + node_id: Some(1), + ordinal: 0, + hostname: Some("10.100.0.1".to_string()), + }; + + let params = service.build_member_params(&spec, &config, "10.100.0.1", 6100, 6100); + assert_eq!(params.container_name, "postgres-my-db-monitor"); + assert_eq!(params.container_port, 6100); + assert_eq!(params.image, DEFAULT_CLUSTER_IMAGE); + // Monitor env should contain the hostname and port for pg_autoctl advertisement + assert_eq!( + params.environment.get("MONITOR_HOSTNAME").unwrap(), + "10.100.0.1" + ); + assert_eq!(params.environment.get("MONITOR_PORT").unwrap(), "6100"); + } + + #[test] + fn test_build_member_params_data_node() { + let docker = Docker::connect_with_defaults() + .unwrap_or_else(|_| Docker::connect_with_local_defaults().unwrap()); + let service = PostgresClusterService::new("my-db".to_string(), Arc::new(docker)); + let config = PostgresClusterConfig { + database: "myapp".to_string(), + username: "admin".to_string(), + password: Some("secret".to_string()), + max_connections: 200, + replicas: 1, + docker_image: None, + ssl_mode: "prefer".to_string(), + }; + + let spec = ClusterMemberSpec { + role: "primary".to_string(), + node_id: Some(2), + ordinal: 1, + hostname: Some("10.100.0.2".to_string()), + }; + + let params = service.build_member_params(&spec, &config, "10.100.0.1", 6100, 6101); + assert_eq!(params.container_name, "postgres-my-db-1"); + assert_eq!(params.container_port, 6101); + assert_eq!( + params.environment.get("MONITOR_URI").unwrap(), + "postgresql://autoctl_node@10.100.0.1:6100/pg_auto_failover" + ); + assert_eq!( + params.environment.get("NODE_HOSTNAME").unwrap(), + "10.100.0.2" + ); + assert_eq!(params.environment.get("NODE_PORT").unwrap(), "6101"); + assert_eq!(params.environment.get("POSTGRES_USER").unwrap(), "admin"); + assert_eq!(params.environment.get("POSTGRES_DB").unwrap(), "myapp"); + } +} diff --git a/crates/temps-providers/src/externalsvc/test_utils.rs b/crates/temps-providers/src/externalsvc/test_utils.rs index 2edbcea9..d5e4ec7e 100644 --- a/crates/temps-providers/src/externalsvc/test_utils.rs +++ b/crates/temps-providers/src/externalsvc/test_utils.rs @@ -319,6 +319,8 @@ pub fn create_mock_external_service( slug: None, config: Some(serde_json::json!({}).to_string()), node_id: None, + topology: "standalone".to_string(), + error_message: None, } } diff --git a/crates/temps-providers/src/handlers/handlers.rs b/crates/temps-providers/src/handlers/handlers.rs index 37877607..f50e006b 100644 --- a/crates/temps-providers/src/handlers/handlers.rs +++ b/crates/temps-providers/src/handlers/handlers.rs @@ -25,8 +25,8 @@ use super::audit::{ use crate::handlers::types::{ AvailableContainerInfo, CreateExternalServiceRequest, EnvironmentVariableInfo, ExternalServiceDetails, ExternalServiceInfo, ImportExternalServiceRequest, LinkServiceRequest, - ProjectServiceInfo, ProviderMetadata, ServiceParameter, ServiceTypeInfo, ServiceTypeRoute, - UpdateExternalServiceRequest, UpgradeExternalServiceRequest, + ProjectServiceInfo, ProviderMetadata, RetryClusterRequest, ServiceParameter, ServiceTypeInfo, + ServiceTypeRoute, UpdateExternalServiceRequest, UpgradeExternalServiceRequest, }; use crate::services::EnvironmentVariableOptions; use temps_core::AuditContext; @@ -241,6 +241,7 @@ pub fn configure_routes() -> Router> { .route("/external-services/{id}/health", get(check_health)) .route("/external-services/{id}/start", post(start_service)) .route("/external-services/{id}/stop", post(stop_service)) + .route("/external-services/{id}/retry", post(retry_cluster)) .route("/external-services/{id}/upgrade", post(upgrade_service)) .route( "/external-services/{id}/projects", @@ -419,6 +420,16 @@ async fn create_service( service_type: request.service_type.into(), version: request.version.clone(), parameters: request.parameters, + node_id: request.node_id, + topology: request.topology, + members: request + .members + .into_iter() + .map(|m| crate::services::ClusterMemberRequest { + role: m.role, + node_id: m.node_id, + }) + .collect(), }; match app_state @@ -785,6 +796,83 @@ async fn start_service( } } +/// Retry a failed cluster service initialization. +/// +/// Cleans up any leftover containers from the previous attempt and +/// re-runs cluster initialization with the provided member specifications. +#[utoipa::path( + post, + path = "/external-services/{id}/retry", + tag = "External Services", + request_body = RetryClusterRequest, + responses( + (status = 200, description = "Cluster retry initiated", body = ExternalServiceInfo), + (status = 400, description = "Service is not a failed cluster"), + (status = 404, description = "Service not found"), + (status = 500, description = "Internal server error") + ), + security(("bearer_auth" = [])) +)] +async fn retry_cluster( + State(app_state): State>, + Path(id): Path, + RequireAuth(auth): RequireAuth, + Extension(metadata): Extension, + Json(request): Json, +) -> Result { + permission_guard!(auth, ExternalServicesWrite); + + let members: Vec = request + .members + .into_iter() + .map(|m| crate::services::ClusterMemberRequest { + role: m.role, + node_id: m.node_id, + }) + .collect(); + + match app_state + .external_service_manager + .retry_cluster(id, &members) + .await + { + Ok(service) => { + let audit = ExternalServiceStatusChangedAudit { + context: AuditContext { + user_id: auth.user_id(), + ip_address: Some(metadata.ip_address.clone()), + user_agent: metadata.user_agent.clone(), + }, + service_id: service.id, + name: service.name.clone(), + service_type: service.service_type.to_string(), + new_status: "retry".to_string(), + }; + + if let Err(e) = app_state.audit_service.create_audit_log(&audit).await { + error!("Failed to create audit log: {}", e); + } + + Ok((StatusCode::OK, Json(service))) + } + Err(e) => { + error!("Failed to retry cluster service {}: {}", id, e); + let msg = e.to_string(); + if msg.contains("not found") { + Err(not_found() + .detail(format!("Service {} not found", id)) + .build()) + } else if msg.contains("only valid for") || msg.contains("must be in") { + Err(bad_request().detail(msg).build()) + } else { + Err(internal_server_error() + .detail(format!("Failed to retry cluster: {}", e)) + .build()) + } + } + } +} + /// Stop an external service #[utoipa::path( post, @@ -1297,6 +1385,7 @@ async fn get_service_preview_environment_variables_masked( delete_service, start_service, stop_service, + retry_cluster, link_service_to_project, unlink_service_from_project, list_service_projects, @@ -1326,6 +1415,7 @@ async fn get_service_preview_environment_variables_masked( CreateExternalServiceRequest, UpdateExternalServiceRequest, UpgradeExternalServiceRequest, + RetryClusterRequest, ImportExternalServiceRequest, AvailableContainerInfo, LinkServiceRequest, diff --git a/crates/temps-providers/src/handlers/types.rs b/crates/temps-providers/src/handlers/types.rs index 3685a25c..88faa5d8 100644 --- a/crates/temps-providers/src/handlers/types.rs +++ b/crates/temps-providers/src/handlers/types.rs @@ -166,6 +166,34 @@ pub struct ExternalServiceInfo { pub connection_info: Option, pub created_at: String, pub updated_at: String, + /// Node ID where the service runs. Null means control plane (local). + #[serde(skip_serializing_if = "Option::is_none")] + pub node_id: Option, + /// Service topology: "standalone" (single container) or "cluster" (HA multi-member). + #[schema(example = "standalone")] + pub topology: String, + /// Cluster members (empty for standalone services). + #[serde(skip_serializing_if = "Vec::is_empty")] + pub members: Vec, + /// Error message from failed initialization. + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, +} + +/// Public info about a cluster member. +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct ServiceMemberInfo { + pub id: i32, + pub role: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub node_id: Option, + pub container_name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + pub status: String, + pub ordinal: i32, } #[derive(Serialize, Deserialize, ToSchema)] @@ -254,6 +282,41 @@ pub struct CreateExternalServiceRequest { pub service_type: ServiceTypeRoute, pub version: Option, pub parameters: HashMap, + /// Target node ID for the service. Omit or null to run on the control plane. + #[serde(default)] + pub node_id: Option, + /// Service topology: "standalone" (default) or "cluster" (HA multi-member). + #[serde(default = "default_topology")] + #[schema(example = "standalone")] + pub topology: String, + /// Cluster member specifications. Required when topology is "cluster". + #[serde(default)] + pub members: Vec, +} + +fn default_topology() -> String { + "standalone".to_string() +} + +/// Request spec for a single cluster member. +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct ClusterMemberRequest { + /// Service-type-specific role (e.g., "monitor", "primary", "replica") + #[schema(example = "primary")] + pub role: String, + /// Target worker node ID. Omit or null to run on the control plane. + #[serde(default)] + pub node_id: Option, +} + +/// Request body for retrying a failed cluster initialization. +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct RetryClusterRequest { + /// Cluster member specifications (same format as create). + /// If omitted, the original member configuration is reconstructed from + /// the preserved service_members records. + #[serde(default)] + pub members: Vec, } #[derive(Debug, Serialize, Deserialize, ToSchema)] diff --git a/crates/temps-providers/src/lib.rs b/crates/temps-providers/src/lib.rs index 6037326b..9a089226 100644 --- a/crates/temps-providers/src/lib.rs +++ b/crates/temps-providers/src/lib.rs @@ -3,6 +3,7 @@ pub mod externalsvc; pub mod parameter_strategies; pub mod query_service; +pub mod remote_service_client; pub mod services; pub use services::*; pub mod plugin; diff --git a/crates/temps-providers/src/query_service.rs b/crates/temps-providers/src/query_service.rs index 40588037..ad09fdc4 100644 --- a/crates/temps-providers/src/query_service.rs +++ b/crates/temps-providers/src/query_service.rs @@ -81,18 +81,46 @@ impl QueryService { )) })?; - // Parse port from string - let port = config - .port - .unwrap_or_else(|| "5432".to_string()) - .parse::() - .map_err(|e| { - DataError::InvalidConfiguration(format!("Invalid port number: {}", e)) - })?; + // For cluster services, resolve the primary data node's address + // instead of using the stored config host/port (which may point to + // the monitor or use default values). + let (host, port) = match self + .external_service_manager + .get_cluster_primary_address(service_id) + .await + { + Ok(Some((primary_host, primary_port))) => { + debug!( + "Using cluster primary {}:{} for service {} explorer", + primary_host, primary_port, service_id + ); + (primary_host, primary_port) + } + Ok(None) => { + // Standalone service — use config host/port as before + let port = config + .port + .unwrap_or_else(|| "5432".to_string()) + .parse::() + .map_err(|e| { + DataError::InvalidConfiguration(format!( + "Invalid port number: {}", + e + )) + })?; + (config.host.clone(), port) + } + Err(e) => { + return Err(DataError::ConnectionFailed(format!( + "Failed to resolve cluster primary for service {}: {}", + service_id, e + ))); + } + }; // Connect to the specified database (not the configured one) let pg_source = PostgresSource::connect( - &config.host, + &host, port, &config.username, &config.password.unwrap_or_default(), diff --git a/crates/temps-providers/src/remote_service_client.rs b/crates/temps-providers/src/remote_service_client.rs new file mode 100644 index 00000000..df436987 --- /dev/null +++ b/crates/temps-providers/src/remote_service_client.rs @@ -0,0 +1,382 @@ +//! HTTP client for calling the agent's service management API on remote nodes. +//! +//! Used by `ExternalServiceManager` to route service operations (create, start, +//! stop, remove) through a worker node's agent when `node_id` is set. + +use serde::Deserialize; +use std::collections::HashMap; +use std::time::Duration; +use tracing::{error, info}; + +use crate::services::ExternalServiceError; + +/// Response envelope from the agent API (mirrors `AgentResponse` in temps-agent). +#[derive(Deserialize)] +struct AgentResponse { + success: bool, + data: Option, + error: Option, +} + +/// Lightweight client for the agent's service endpoints. +pub struct RemoteServiceClient { + agent_url: String, + token: String, + node_name: String, + client: reqwest::Client, +} + +/// Parameters needed to create a service on a remote node. +#[derive(Debug, serde::Serialize)] +pub struct RemoteServiceCreateParams { + pub name: String, + pub service_type: String, + pub image: String, + pub environment: HashMap, + pub port_mappings: Vec, + pub volumes: HashMap, + #[serde(skip_serializing_if = "Option::is_none")] + pub network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub command: Option>, +} + +#[derive(Debug, serde::Serialize)] +pub struct RemotePortMapping { + /// Host port to bind. `0` means let Docker auto-assign a free port. + pub host_port: u16, + pub container_port: u16, +} + +/// Response after creating a service on the agent. +#[derive(Debug, Deserialize)] +pub struct RemoteServiceCreateResponse { + pub container_id: String, + pub container_name: String, + pub host_port: u16, +} + +/// Status of a service on a remote node. +#[derive(Debug, Deserialize)] +pub struct RemoteServiceStatus { + pub container_name: String, + pub container_id: Option, + pub running: bool, + pub health: Option, +} + +impl RemoteServiceClient { + /// Create a new client for the given agent. + /// + /// * `agent_url` — base URL, e.g. `https://10.100.0.2:3100` + /// * `token` — plaintext bearer token for auth + /// * `node_name` — human-readable name (for error messages) + pub fn new( + agent_url: String, + token: String, + node_name: String, + ) -> Result { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(300)) + .danger_accept_invalid_certs(true) + .build() + .map_err(|e| ExternalServiceError::InternalError { + reason: format!("Failed to create HTTP client for node {}: {}", node_name, e), + })?; + + Ok(Self { + agent_url, + token, + node_name, + client, + }) + } + + /// Create and start a service container on the remote node. + pub async fn create_service( + &self, + params: RemoteServiceCreateParams, + ) -> Result { + info!( + "Creating service '{}' on remote node '{}'", + params.name, self.node_name + ); + self.agent_post("/agent/services", ¶ms).await + } + + /// Start an existing service container on the remote node. + pub async fn start_service(&self, container_name: &str) -> Result<(), ExternalServiceError> { + info!( + "Starting service '{}' on remote node '{}'", + container_name, self.node_name + ); + let _: serde_json::Value = self + .agent_post_no_body(&format!("/agent/services/{}/start", container_name)) + .await?; + Ok(()) + } + + /// Stop a service container on the remote node. + pub async fn stop_service(&self, container_name: &str) -> Result<(), ExternalServiceError> { + info!( + "Stopping service '{}' on remote node '{}'", + container_name, self.node_name + ); + let _: serde_json::Value = self + .agent_post_no_body(&format!("/agent/services/{}/stop", container_name)) + .await?; + Ok(()) + } + + /// Remove a service container (and its volumes) on the remote node. + pub async fn remove_service(&self, container_name: &str) -> Result<(), ExternalServiceError> { + info!( + "Removing service '{}' on remote node '{}'", + container_name, self.node_name + ); + self.agent_delete(&format!("/agent/services/{}", container_name)) + .await + } + + /// Get the status of a service on the remote node. + pub async fn service_status( + &self, + container_name: &str, + ) -> Result { + self.agent_get(&format!("/agent/services/{}/status", container_name)) + .await + } + + // ----------------------------------------------------------------------- + // HTTP helpers + // ----------------------------------------------------------------------- + + async fn agent_get Deserialize<'de>>( + &self, + path: &str, + ) -> Result { + let url = format!("{}{}", self.agent_url, path); + let response = self + .client + .get(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Failed to reach agent on node {} at {}: {}", + self.node_name, url, e + ), + })?; + + let status = response.status(); + let body: AgentResponse = + response + .json() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Invalid response from node {} at {}: {}", + self.node_name, url, e + ), + })?; + + if !body.success { + let err_msg = body.error.unwrap_or_default(); + error!( + "Agent on node {} returned error ({}) at {}: {}", + self.node_name, status, url, err_msg + ); + return Err(ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned error ({}): {}", + self.node_name, status, err_msg + ), + }); + } + + body.data + .ok_or_else(|| ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned success but no data at {}", + self.node_name, url + ), + }) + } + + async fn agent_post Deserialize<'de>>( + &self, + path: &str, + body: &B, + ) -> Result { + let url = format!("{}{}", self.agent_url, path); + let response = self + .client + .post(&url) + .bearer_auth(&self.token) + .json(body) + .send() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Failed to reach agent on node {} at {}: {}", + self.node_name, url, e + ), + })?; + + let status = response.status(); + let body: AgentResponse = + response + .json() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Invalid response from node {} at {}: {}", + self.node_name, url, e + ), + })?; + + if !body.success { + let err_msg = body.error.unwrap_or_default(); + error!( + "Agent on node {} returned error ({}) at {}: {}", + self.node_name, status, url, err_msg + ); + return Err(ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned error ({}): {}", + self.node_name, status, err_msg + ), + }); + } + + body.data + .ok_or_else(|| ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned success but no data at {}", + self.node_name, url + ), + }) + } + + async fn agent_post_no_body Deserialize<'de>>( + &self, + path: &str, + ) -> Result { + let url = format!("{}{}", self.agent_url, path); + let response = self + .client + .post(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Failed to reach agent on node {} at {}: {}", + self.node_name, url, e + ), + })?; + + let status = response.status(); + let body: AgentResponse = + response + .json() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Invalid response from node {} at {}: {}", + self.node_name, url, e + ), + })?; + + if !body.success { + let err_msg = body.error.unwrap_or_default(); + return Err(ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned error ({}): {}", + self.node_name, status, err_msg + ), + }); + } + + body.data + .ok_or_else(|| ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned success but no data at {}", + self.node_name, url + ), + }) + } + + async fn agent_delete(&self, path: &str) -> Result<(), ExternalServiceError> { + let url = format!("{}{}", self.agent_url, path); + let response = self + .client + .delete(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Failed to reach agent on node {} at {}: {}", + self.node_name, url, e + ), + })?; + + let body: AgentResponse = + response + .json() + .await + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Invalid response from node {} at {}: {}", + self.node_name, url, e + ), + })?; + + if !body.success { + let err_msg = body.error.unwrap_or_default(); + return Err(ExternalServiceError::InternalError { + reason: format!( + "Agent on node {} returned error: {}", + self.node_name, err_msg + ), + }); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_remote_service_create_params_serialization() { + let params = RemoteServiceCreateParams { + name: "postgres-main".to_string(), + service_type: "postgres".to_string(), + image: "gotempsh/postgres-walg:18-bookworm".to_string(), + environment: HashMap::from([ + ("POSTGRES_PASSWORD".to_string(), "secret".to_string()), + ("POSTGRES_DB".to_string(), "mydb".to_string()), + ]), + port_mappings: vec![RemotePortMapping { + host_port: 30001, + container_port: 5432, + }], + volumes: HashMap::from([( + "postgres-main_data".to_string(), + "/var/lib/postgresql".to_string(), + )]), + network: Some("temps".to_string()), + command: None, + }; + + let json = serde_json::to_string(¶ms).unwrap(); + assert!(json.contains("postgres-main")); + assert!(json.contains("30001")); + assert!(!json.contains("command")); // None fields skipped + } +} diff --git a/crates/temps-providers/src/services.rs b/crates/temps-providers/src/services.rs index 6d75dbeb..b0fbb72f 100644 --- a/crates/temps-providers/src/services.rs +++ b/crates/temps-providers/src/services.rs @@ -1,22 +1,28 @@ use crate::externalsvc::{ - mongodb::MongodbService, postgres::PostgresService, redis::RedisService, rustfs::RustfsService, - s3::S3Service, AvailableContainer, ExternalService, ServiceConfig, ServiceType, + mongodb::MongodbService, postgres::PostgresService, postgres_cluster::PostgresClusterService, + redis::RedisService, rustfs::RustfsService, s3::S3Service, AvailableContainer, + ClusterMemberSpec, ExternalService, ServiceConfig, ServiceType, }; use crate::parameter_strategies; +use crate::remote_service_client::{ + RemotePortMapping, RemoteServiceClient, RemoteServiceCreateParams, +}; use crate::types::EnvironmentVariableInfo; use anyhow::Result; use bollard::Docker; use chrono::Utc; use sea_orm::{ - ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter, - QueryOrder, Set, TransactionTrait, + sea_query::Expr, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, + PaginatorTrait, QueryFilter, QueryOrder, Set, TransactionTrait, }; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::sync::Arc; -use temps_entities::{external_service_backups, external_services, project_services, projects}; +use temps_entities::{ + external_service_backups, external_services, nodes, project_services, projects, service_members, +}; use thiserror::Error; -use tracing::{error, info}; +use tracing::{error, info, warn}; // use crate::routes::types::external_services::EnvironmentVariableInfo; use temps_core::EncryptionService; // Add these constants at the top of the file proper key management @@ -133,6 +139,29 @@ pub struct CreateExternalServiceRequest { pub service_type: ServiceType, pub version: Option, pub parameters: HashMap, + /// Target node ID for the service. None = local (control plane). + /// For cluster topology, this is ignored (members specify their own node_ids). + pub node_id: Option, + /// Service topology: "standalone" (default, single container) or "cluster" (HA multi-member). + #[serde(default = "default_topology")] + pub topology: String, + /// Cluster member specifications. Required when topology is "cluster". + /// Each member specifies a role, target node, and ordinal. + #[serde(default)] + pub members: Vec, +} + +fn default_topology() -> String { + "standalone".to_string() +} + +/// Request spec for a single cluster member. +#[derive(Debug, Clone, Deserialize)] +pub struct ClusterMemberRequest { + /// Service-type-specific role (e.g., "monitor", "primary", "replica") + pub role: String, + /// Target worker node ID. None = local (control plane). + pub node_id: Option, } #[derive(Debug, Deserialize)] @@ -190,6 +219,29 @@ pub struct ExternalServiceInfo { pub connection_info: Option, pub created_at: String, pub updated_at: String, + /// Node ID where the service runs. None = control plane (local). + /// For cluster topology, this is None (members have their own node_ids). + pub node_id: Option, + /// Service topology: "standalone" or "cluster". + pub topology: String, + /// Cluster members (empty for standalone services). + #[serde(skip_serializing_if = "Vec::is_empty")] + pub members: Vec, + /// Error message from failed initialization (None if no error). + pub error_message: Option, +} + +/// Public info about a cluster member. +#[derive(Debug, Clone, Serialize)] +pub struct ServiceMemberInfo { + pub id: i32, + pub role: String, + pub node_id: Option, + pub container_name: String, + pub hostname: Option, + pub port: Option, + pub status: String, + pub ordinal: i32, } #[derive(Debug, Serialize, Clone)] @@ -225,6 +277,23 @@ impl ExternalServiceManager { } } + /// Determine the local machine's private IP address for inter-node communication. + /// + /// Uses a UDP socket to determine which interface would be used to reach + /// a public address (without actually sending any data). This gives us the + /// correct source IP for the machine's default route. + fn get_local_private_ip() -> Result { + let socket = std::net::UdpSocket::bind("0.0.0.0:0") + .map_err(|e| format!("Failed to bind UDP socket: {}", e))?; + socket + .connect("8.8.8.8:80") + .map_err(|e| format!("Failed to connect UDP socket: {}", e))?; + let local_addr = socket + .local_addr() + .map_err(|e| format!("Failed to get local address: {}", e))?; + Ok(local_addr.ip().to_string()) + } + pub async fn get_local_address( &self, service: external_services::Model, @@ -272,6 +341,7 @@ impl ExternalServiceManager { match service_type { ServiceType::Mongodb => Box::new(MongodbService::new(name, self.docker.clone())), ServiceType::Postgres => Box::new(PostgresService::new(name, self.docker.clone())), + // Note: PostgresCluster is handled via create_cluster_service_instance, not here ServiceType::Redis => Box::new(RedisService::new(name, self.docker.clone())), // S3 now uses RustFS by default (high-performance S3-compatible storage) ServiceType::S3 => Box::new(RustfsService::new( @@ -305,6 +375,238 @@ impl ExternalServiceManager { } } + // ----------------------------------------------------------------------- + // Remote-node helpers + // ----------------------------------------------------------------------- + + /// Look up a node by ID and return a `RemoteServiceClient` ready to call + /// the agent's service endpoints. + async fn get_remote_client( + &self, + node_id: i32, + ) -> Result { + let node = nodes::Entity::find_by_id(node_id) + .one(self.db.as_ref()) + .await? + .ok_or(ExternalServiceError::InternalError { + reason: format!("Node {} not found", node_id), + })?; + + let token = node + .token_encrypted + .as_deref() + .ok_or(ExternalServiceError::InternalError { + reason: format!( + "Node {} ({}) has no encrypted token — cannot authenticate", + node_id, node.name + ), + }) + .and_then(|encrypted| { + self.encryption_service + .decrypt_string(encrypted) + .map_err(|e| ExternalServiceError::InternalError { + reason: format!( + "Failed to decrypt token for node {} ({}): {}", + node_id, node.name, e + ), + }) + })?; + + RemoteServiceClient::new(node.address.clone(), token, node.name.clone()) + } + + /// Build the `RemoteServiceCreateParams` that the agent needs to create a + /// Docker container for a given service type and parameters. + fn build_remote_create_params( + &self, + service_name: &str, + service_type: &ServiceType, + parameters: &HashMap, + ) -> Result { + let (image, container_port, env, volume_path, command) = match service_type { + ServiceType::Postgres => { + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "gotempsh/postgres-walg:18-bookworm".to_string()); + let password = parameters.get("password").cloned().unwrap_or_default(); + let database = parameters + .get("database") + .cloned() + .unwrap_or_else(|| "postgres".to_string()); + let username = parameters + .get("username") + .cloned() + .unwrap_or_else(|| "postgres".to_string()); + let max_connections = parameters + .get("max_connections") + .cloned() + .unwrap_or_else(|| "100".to_string()); + + let env = HashMap::from([ + ("POSTGRES_USER".to_string(), username), + ("POSTGRES_PASSWORD".to_string(), password), + ("POSTGRES_DB".to_string(), database), + ("POSTGRES_HOST_AUTH_METHOD".to_string(), "md5".to_string()), + ]); + let cmd = vec![ + "postgres".to_string(), + "-c".to_string(), + format!("max_connections={}", max_connections), + "-c".to_string(), + "wal_level=replica".to_string(), + "-c".to_string(), + "archive_mode=on".to_string(), + "-c".to_string(), + "archive_timeout=60".to_string(), + ]; + ( + image, + 5432u16, + env, + "/var/lib/postgresql".to_string(), + Some(cmd), + ) + } + ServiceType::Redis => { + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "gotempsh/redis-walg:8-bookworm".to_string()); + let password = parameters.get("password").cloned().unwrap_or_default(); + let env = HashMap::new(); + let cmd = if password.is_empty() { + vec!["redis-server".to_string()] + } else { + vec![ + "redis-server".to_string(), + "--requirepass".to_string(), + password, + ] + }; + (image, 6379u16, env, "/data".to_string(), Some(cmd)) + } + ServiceType::Mongodb => { + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "mongo:7".to_string()); + let username = parameters + .get("username") + .cloned() + .unwrap_or_else(|| "admin".to_string()); + let password = parameters.get("password").cloned().unwrap_or_default(); + let database = parameters + .get("database") + .cloned() + .unwrap_or_else(|| "admin".to_string()); + let env = HashMap::from([ + ("MONGO_INITDB_ROOT_USERNAME".to_string(), username), + ("MONGO_INITDB_ROOT_PASSWORD".to_string(), password), + ("MONGO_INITDB_DATABASE".to_string(), database), + ]); + (image, 27017u16, env, "/data/db".to_string(), None) + } + ServiceType::S3 | ServiceType::Rustfs | ServiceType::Blob => { + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "ghcr.io/rustfs/rustfs:latest".to_string()); + let access_key = parameters + .get("access_key") + .cloned() + .unwrap_or_else(|| "minioadmin".to_string()); + let secret_key = parameters.get("secret_key").cloned().unwrap_or_default(); + let env = HashMap::from([ + ("RUSTFS_ROOT_USER".to_string(), access_key), + ("RUSTFS_ROOT_PASSWORD".to_string(), secret_key), + ]); + let cmd = vec![ + "rustfs".to_string(), + "server".to_string(), + "/data".to_string(), + ]; + (image, 9000u16, env, "/data".to_string(), Some(cmd)) + } + ServiceType::Kv => { + // KV is Redis-backed + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "gotempsh/redis-walg:8-bookworm".to_string()); + let password = parameters.get("password").cloned().unwrap_or_default(); + let env = HashMap::new(); + let cmd = if password.is_empty() { + vec!["redis-server".to_string()] + } else { + vec![ + "redis-server".to_string(), + "--requirepass".to_string(), + password, + ] + }; + (image, 6379u16, env, "/data".to_string(), Some(cmd)) + } + #[allow(deprecated)] + ServiceType::Minio => { + let image = parameters + .get("docker_image") + .cloned() + .unwrap_or_else(|| "minio/minio:latest".to_string()); + let access_key = parameters + .get("access_key") + .cloned() + .unwrap_or_else(|| "minioadmin".to_string()); + let secret_key = parameters.get("secret_key").cloned().unwrap_or_default(); + let env = HashMap::from([ + ("MINIO_ROOT_USER".to_string(), access_key), + ("MINIO_ROOT_PASSWORD".to_string(), secret_key), + ]); + let cmd = vec![ + "minio".to_string(), + "server".to_string(), + "/data".to_string(), + ]; + (image, 9000u16, env, "/data".to_string(), Some(cmd)) + } + }; + + let host_port: u16 = parameters + .get("port") + .and_then(|p| p.parse().ok()) + .unwrap_or(container_port); + + let container_name = self + .create_service_instance(service_name.to_string(), *service_type) + .get_name(); + let container_name_for_volume = format!("{}-{}", service_type, service_name); + let volume_name = format!("{}_data", container_name_for_volume); + + Ok(RemoteServiceCreateParams { + name: container_name, + service_type: service_type.to_string(), + image, + environment: env, + port_mappings: vec![RemotePortMapping { + host_port, + container_port, + }], + volumes: HashMap::from([(volume_name, volume_path)]), + network: Some(temps_core::NETWORK_NAME.to_string()), + command, + }) + } + + /// Get the container name for a service (used for remote operations). + fn get_container_name_for_service( + &self, + service_name: &str, + service_type: &ServiceType, + ) -> String { + self.create_service_instance(service_name.to_string(), *service_type) + .get_name() + } + pub async fn get_service_by_name( &self, name_param: &str, @@ -375,6 +677,9 @@ impl ExternalServiceManager { reason: format!("Failed to encrypt config: {}", e), })?; + let topology = request.topology.clone(); + let topology_for_txn = topology.clone(); + // Start transaction let service = self .db @@ -388,6 +693,8 @@ impl ExternalServiceManager { version: Set(request.version.clone()), status: Set("pending".to_string()), config: Set(Some(encrypted_config)), + node_id: Set(request.node_id), + topology: Set(topology_for_txn), created_at: Set(Utc::now()), updated_at: Set(Utc::now()), ..Default::default() @@ -401,33 +708,96 @@ impl ExternalServiceManager { .await .map_err(ExternalServiceError::from)?; - // Initialize the service - if this fails, delete the service record to maintain consistency - let init_result = self.initialize_service(service.id).await; - if let Err(e) = init_result { - // Initialization failed - clean up the database record - error!( - "Service initialization failed for service {}: {}. Rolling back database record.", - service.id, e - ); + // Initialize the service + if topology == "cluster" { + // Cluster creation is async — update status to "creating" and spawn background task. + // The frontend polls GET /external-services/{id} to track progress. + let mut service_update: external_services::ActiveModel = service.clone().into(); + service_update.status = Set("creating".to_string()); + service_update.update(self.db.as_ref()).await?; + + let db = self.db.clone(); + let docker = self.docker.clone(); + let encryption_service = self.encryption_service.clone(); + let service_id = service.id; + let members = request.members.clone(); + + tokio::spawn(async move { + let manager = ExternalServiceManager::new(db.clone(), encryption_service, docker); + let result = manager.initialize_cluster(service_id, &members).await; + + match result { + Ok(()) => { + info!( + "Cluster service {} initialized successfully (background)", + service_id + ); + // Status already set to "running" inside initialize_cluster + } + Err(e) => { + error!( + "Background cluster creation failed for service {}: {}", + service_id, e + ); + + // Update service status to "failed" with error message + let update_result: Result<_, sea_orm::DbErr> = async { + let mut svc: external_services::ActiveModel = + external_services::Entity::find_by_id(service_id) + .one(db.as_ref()) + .await? + .ok_or(sea_orm::DbErr::RecordNotFound( + "Service not found during rollback".to_string(), + ))? + .into(); + svc.status = Set("failed".to_string()); + svc.error_message = Set(Some(e.to_string())); + svc.updated_at = Set(Utc::now()); + svc.update(db.as_ref()).await?; + Ok(()) + } + .await; - // Delete the service record - if let Err(delete_err) = external_services::Entity::delete_by_id(service.id) - .exec(self.db.as_ref()) - .await - { + if let Err(db_err) = update_result { + error!( + "Failed to update service {} status to 'failed': {}", + service_id, db_err + ); + } + } + } + }); + + // Return immediately with "creating" status + self.get_service_info(service.id).await + } else { + // Standalone: initialize synchronously + let init_result = self.initialize_service(service.id).await; + + if let Err(e) = init_result { error!( - "Failed to clean up service {} after initialization failure: {}", - service.id, delete_err + "Service initialization failed for service {}: {}. Rolling back database record.", + service.id, e ); + + if let Err(delete_err) = external_services::Entity::delete_by_id(service.id) + .exec(self.db.as_ref()) + .await + { + error!( + "Failed to clean up service {} after initialization failure: {}", + service.id, delete_err + ); + } + + return Err(ExternalServiceError::InitializationFailed { + id: service.id, + reason: e.to_string(), + }); } - return Err(ExternalServiceError::InitializationFailed { - id: service.id, - reason: e.to_string(), - }); + self.get_service_info(service.id).await } - - self.get_service_info(service.id).await } pub async fn get_service_config( @@ -689,7 +1059,7 @@ impl ExternalServiceManager { let service_type_enum = ServiceType::from_str(&service.service_type).map_err(|_| { ExternalServiceError::InvalidServiceType { id: service_id, - service_type: service.service_type, + service_type: service.service_type.clone(), } })?; @@ -706,26 +1076,32 @@ impl ExternalServiceManager { }); } - let service_instance = - self.create_service_instance(service.name.clone(), service_type_enum); + // Load cluster members BEFORE deleting DB records (needed for container cleanup) + let members = service_members::Entity::find() + .filter(service_members::Column::ServiceId.eq(service_id)) + .all(self.db.as_ref()) + .await?; + let is_cluster = !members.is_empty(); - // Delete from database + // Delete from database first self.db .transaction::<_, (), ExternalServiceError>(|txn| { Box::pin(async move { - // Delete project links (should be empty due to check above) project_services::Entity::delete_many() .filter(project_services::Column::ServiceId.eq(service_id)) .exec(txn) .await?; - // Delete service backups external_service_backups::Entity::delete_many() .filter(external_service_backups::Column::ServiceId.eq(service_id)) .exec(txn) .await?; - // Delete service + service_members::Entity::delete_many() + .filter(service_members::Column::ServiceId.eq(service_id)) + .exec(txn) + .await?; + external_services::Entity::delete_by_id(service_id) .exec(txn) .await?; @@ -736,15 +1112,108 @@ impl ExternalServiceManager { .await .map_err(ExternalServiceError::from)?; - // Stop the service - info!("Stopping service {} before deletion", service_id); - service_instance - .remove() - .await - .map_err(|e| ExternalServiceError::DeletionFailed { - id: service_id, - reason: e.to_string(), - })?; + // Remove containers + if is_cluster { + // Cluster: remove each member container (best-effort, log failures) + info!( + "Removing {} cluster member container(s) for service {}", + members.len(), + service_id + ); + let mut errors = Vec::new(); + + for member in &members { + if let Some(node_id) = member.node_id { + match self.get_remote_client(node_id).await { + Ok(client) => { + if let Err(e) = client.remove_service(&member.container_name).await { + let msg = format!( + "Failed to remove remote container '{}' on node {}: {}", + member.container_name, node_id, e + ); + error!("{}", msg); + errors.push(msg); + } + } + Err(e) => { + let msg = format!( + "Failed to connect to node {} to remove '{}': {}", + node_id, member.container_name, e + ); + error!("{}", msg); + errors.push(msg); + } + } + } else { + // Local container + if let Err(e) = self + .docker + .remove_container( + &member.container_name, + Some(bollard::query_parameters::RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + { + let msg = format!( + "Failed to remove local container '{}': {}", + member.container_name, e + ); + error!("{}", msg); + errors.push(msg); + } + + // Also remove the volume + let volume_name = format!("{}_data", member.container_name); + if let Err(e) = self + .docker + .remove_volume( + &volume_name, + None::, + ) + .await + { + warn!("Failed to remove volume '{}': {}", volume_name, e); + } + } + } + + if !errors.is_empty() { + return Err(ExternalServiceError::DeletionFailed { + id: service_id, + reason: format!( + "Service deleted from database but {} container(s) failed to remove: {}", + errors.len(), + errors.join("; ") + ), + }); + } + } else { + // Standalone: remove single container + info!("Removing service {} container", service_id); + if let Some(node_id) = service.node_id { + let client = self.get_remote_client(node_id).await?; + let container_name = + self.get_container_name_for_service(&service.name, &service_type_enum); + client.remove_service(&container_name).await.map_err(|e| { + ExternalServiceError::DeletionFailed { + id: service_id, + reason: e.to_string(), + } + })?; + } else { + let service_instance = + self.create_service_instance(service.name.clone(), service_type_enum); + service_instance.remove().await.map_err(|e| { + ExternalServiceError::DeletionFailed { + id: service_id, + reason: e.to_string(), + } + })?; + } + } Ok(()) } @@ -772,6 +1241,13 @@ impl ExternalServiceManager { ) -> Result { let service = self.get_service(service_id).await?; + // Load cluster members if this is a cluster topology + let members = if service.topology == "cluster" { + self.get_service_members(service_id).await? + } else { + Vec::new() + }; + Ok(ExternalServiceInfo { id: service.id, name: service.name, @@ -786,9 +1262,79 @@ impl ExternalServiceManager { connection_info: None, created_at: service.created_at.to_rfc3339(), updated_at: service.updated_at.to_rfc3339(), + node_id: service.node_id, + topology: service.topology, + members, + error_message: service.error_message, }) } + /// Get all members for a cluster service. + pub async fn get_service_members( + &self, + service_id: i32, + ) -> Result, ExternalServiceError> { + let members = service_members::Entity::find() + .filter(service_members::Column::ServiceId.eq(service_id)) + .order_by_asc(service_members::Column::Ordinal) + .all(self.db.as_ref()) + .await?; + + Ok(members + .into_iter() + .map(|m| ServiceMemberInfo { + id: m.id, + role: m.role, + node_id: m.node_id, + container_name: m.container_name, + hostname: m.hostname, + port: m.port, + status: m.status, + ordinal: m.ordinal, + }) + .collect()) + } + + /// Get the primary data node's connection address for a cluster service. + /// + /// Returns `Some((host, port))` if the service is a cluster with a running primary. + /// Returns `None` if the service is standalone (not a cluster). + /// + /// For local clusters, `host` is the container name (Docker DNS). + /// For remote clusters, `host` is the member's hostname (private/WireGuard IP). + pub async fn get_cluster_primary_address( + &self, + service_id: i32, + ) -> Result, ExternalServiceError> { + let service = self.get_service(service_id).await?; + if service.topology != "cluster" { + return Ok(None); + } + + let members = self.get_service_members(service_id).await?; + + // Find the primary data node (not monitor, not replica) + let primary = members + .iter() + .find(|m| m.role == "primary" && m.status == "running"); + + if let Some(primary) = primary { + let host = primary + .hostname + .clone() + .unwrap_or_else(|| primary.container_name.clone()); + let port = primary.port.unwrap_or(5432) as u16; + Ok(Some((host, port))) + } else { + Err(ExternalServiceError::InternalError { + reason: format!( + "Cluster service {} has no running primary data node", + service_id + ), + }) + } + } + async fn get_service_parameters( &self, service_id_val: i32, @@ -837,6 +1383,20 @@ impl ExternalServiceManager { } })?; + // Remote node — delegate to agent + if let Some(node_id) = service.node_id { + return self + .initialize_service_remote( + service_id, + node_id, + &service, + ¶meters, + &service_type_enum, + ) + .await; + } + + // Local node — use existing Docker-based service logic let service_instance = self.create_service_instance(service.name.clone(), service_type_enum); @@ -893,6 +1453,873 @@ impl ExternalServiceManager { Ok(()) } + /// Initialize a service on a remote node via the agent API. + async fn initialize_service_remote( + &self, + service_id: i32, + node_id: i32, + service: &external_services::Model, + parameters: &HashMap, + service_type: &ServiceType, + ) -> Result<(), ExternalServiceError> { + info!( + "Initializing service {} on remote node {}", + service_id, node_id + ); + let client = self.get_remote_client(node_id).await?; + + // Flatten serde_json::Value parameters to strings for the builder + let string_params: HashMap = parameters + .iter() + .map(|(k, v)| { + let s = match v { + serde_json::Value::String(s) => s.clone(), + other => other.to_string(), + }; + (k.clone(), s) + }) + .collect(); + + let create_params = + self.build_remote_create_params(&service.name, service_type, &string_params)?; + + // Try to stop existing container first (ignore errors — may not exist) + let container_name = create_params.name.clone(); + if let Err(e) = client.stop_service(&container_name).await { + info!( + "Could not stop remote container {} (may not exist): {}", + container_name, e + ); + } + + // Create the container on the remote node + let response = client.create_service(create_params).await.map_err(|e| { + ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!("Remote agent create_service failed: {}", e), + } + })?; + + info!( + "Service {} created on node {} — container {} (port {})", + service_id, node_id, response.container_name, response.host_port + ); + + // Store the host_port as an inferred parameter so env-var generation works + let mut inferred = HashMap::new(); + inferred.insert("port".to_string(), response.host_port.to_string()); + inferred.insert("container_id".to_string(), response.container_id.clone()); + + // Persist inferred parameters + let mut current_params = self.get_service_parameters(service_id).await?; + for (key, value) in inferred { + if Self::is_inferred_parameter(&key) || !current_params.contains_key(&key) { + current_params.insert(key, serde_json::Value::String(value)); + } + } + let config_json = serde_json::to_string(¤t_params).map_err(|e| { + ExternalServiceError::InternalError { + reason: format!("Failed to serialize updated params: {}", e), + } + })?; + let encrypted_config = self + .encryption_service + .encrypt_string(&config_json) + .map_err(|e| ExternalServiceError::InternalError { + reason: format!("Failed to encrypt updated params: {}", e), + })?; + + let mut service_update: external_services::ActiveModel = service.clone().into(); + service_update.status = Set("running".to_string()); + service_update.config = Set(Some(encrypted_config)); + service_update.updated_at = Set(Utc::now()); + service_update.update(self.db.as_ref()).await?; + + Ok(()) + } + + // ----------------------------------------------------------------------- + // Cluster initialization + // ----------------------------------------------------------------------- + + /// Create a cluster-aware service instance for the given service type. + fn create_cluster_service_instance( + &self, + name: String, + service_type: ServiceType, + ) -> Option> { + match service_type { + ServiceType::Postgres => Some(Box::new(PostgresClusterService::new( + name, + self.docker.clone(), + ))), + // Future: Redis Sentinel, MongoDB Replica Set, RustFS distributed + _ => None, + } + } + + /// Initialize a cluster service: create member containers across nodes, + /// then record them in the service_members table. + async fn initialize_cluster( + &self, + service_id: i32, + member_requests: &[ClusterMemberRequest], + ) -> Result<(), ExternalServiceError> { + info!("Initializing cluster for service {}", service_id); + let service = self.get_service(service_id).await?; + let parameters = self.get_service_parameters(service_id).await?; + let service_type = ServiceType::from_str(&service.service_type).map_err(|_| { + ExternalServiceError::InvalidServiceType { + id: service_id, + service_type: service.service_type.clone(), + } + })?; + + let cluster_instance = self + .create_cluster_service_instance(service.name.clone(), service_type) + .ok_or_else(|| ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!( + "Service type '{}' does not support cluster topology", + service.service_type + ), + })?; + + // Validate roles + let valid_roles = cluster_instance.valid_cluster_roles(); + for (i, member) in member_requests.iter().enumerate() { + if !valid_roles.contains(&member.role.as_str()) { + return Err(ExternalServiceError::ParameterValidationFailed { + service_id, + reason: format!( + "Invalid role '{}' for member {}. Valid roles: {:?}", + member.role, i, valid_roles + ), + }); + } + } + + // Build member specs with ordinals and hostnames. + // + // When the cluster spans multiple nodes (has any remote members), + // local members must advertise a routable IP instead of a Docker + // container name — remote workers cannot resolve container names + // from another host's Docker network. + let has_remote_members = member_requests.iter().any(|m| m.node_id.is_some()); + let local_private_ip: Option = if has_remote_members { + Some(Self::get_local_private_ip().map_err(|e| { + ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!( + "Cluster has remote members but could not determine local private IP: {}", + e + ), + } + })?) + } else { + None + }; + + let mut member_specs = Vec::new(); + for (i, member) in member_requests.iter().enumerate() { + let hostname: Option = if let Some(node_id) = member.node_id { + // Look up the node's private address for inter-member communication + let node = nodes::Entity::find_by_id(node_id) + .one(self.db.as_ref()) + .await? + .ok_or(ExternalServiceError::InternalError { + reason: format!("Node {} not found", node_id), + })?; + Some(node.private_address.clone()) + } else { + // Local member: use control plane's private IP if available + // (so remote workers can reach it), otherwise None (Docker DNS) + local_private_ip.clone() + }; + + member_specs.push(ClusterMemberSpec { + role: member.role.clone(), + node_id: member.node_id, + ordinal: i as i32, + hostname, + }); + } + + // Get the cluster config for building member-specific params + let service_config = ServiceConfig { + name: service.name.clone(), + service_type, + version: service.version.clone(), + parameters: serde_json::to_value(¶meters).map_err(|e| { + ExternalServiceError::InternalError { + reason: format!("Failed to serialize parameters: {}", e), + } + })?, + }; + + // Call init_cluster to get the container specs (names, ports) + let member_results = cluster_instance + .init_cluster(service_config.clone(), member_specs.clone()) + .await + .map_err(|e| ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!("Cluster init_cluster failed: {}", e), + })?; + + // Get the Postgres cluster service for building member params + let pg_cluster = match service_type { + ServiceType::Postgres => Some(PostgresClusterService::new( + service.name.clone(), + self.docker.clone(), + )), + _ => None, + }; + + let cluster_config_parsed: crate::externalsvc::postgres_cluster::PostgresClusterConfig = + serde_json::from_value(service_config.parameters.clone()).map_err(|e| { + ExternalServiceError::InternalError { + reason: format!("Failed to parse cluster config: {}", e), + } + })?; + + // Find the monitor hostname for data node configuration. + // For remote workers, use the node's private/WireGuard address. + // For local (no node_id), use the monitor container name so Docker DNS resolves it. + let monitor_spec = member_specs.iter().find(|m| m.role == "monitor"); + let pg_cluster_name = service.name.clone(); + let monitor_container_fallback = format!("postgres-{}-monitor", pg_cluster_name); + let monitor_hostname = monitor_spec + .and_then(|m| m.hostname.as_deref()) + .unwrap_or(&monitor_container_fallback); + + // Assign unique host ports for each cluster member to avoid conflicts + // with other services (e.g., the platform's own TimescaleDB on 5432). + // Base port is derived from service_id to keep ports stable across restarts. + // Range: 6000 + (service_id * 10) + ordinal, giving 10 ports per cluster. + let base_port = 6000u16 + (service_id as u16 * 10); + // Monitor gets base_port, data nodes get base_port + 1, +2, etc. + let monitor_port = base_port; + info!( + "Cluster '{}' port assignment: monitor={}, data nodes start at {}", + pg_cluster_name, + monitor_port, + base_port + 1 + ); + + // Track successfully created members for rollback on failure + struct CreatedMember { + container_name: String, + node_id: Option, + } + let mut created_members: Vec = Vec::new(); + + // Create each member container (in order: monitor first, then data nodes) + let create_result: Result<(), ExternalServiceError> = async { + for (result, spec) in member_results.iter().zip(member_specs.iter()) { + info!( + "Creating cluster member: {} (role: {}, ordinal: {}, node: {:?})", + result.container_name, result.role, result.ordinal, spec.node_id + ); + + // Insert member record with "creating" status so frontend can track progress + let member_record = service_members::ActiveModel { + service_id: Set(service_id), + node_id: Set(spec.node_id), + role: Set(result.role.clone()), + container_id: Set(None), + container_name: Set(result.container_name.clone()), + hostname: Set(spec.hostname.clone()), + port: Set(None), + status: Set("creating".to_string()), + ordinal: Set(result.ordinal), + config: Set(None), + created_at: Set(Utc::now()), + updated_at: Set(Utc::now()), + ..Default::default() + }; + let member_model = member_record.insert(self.db.as_ref()).await?; + + // Assign port: monitor gets base_port, data nodes get base + ordinal + let member_port = if spec.role == "monitor" { + monitor_port + } else { + base_port + spec.ordinal as u16 + }; + + let (container_id, host_port) = if let Some(node_id) = spec.node_id { + // Remote: dispatch to agent + let client = self.get_remote_client(node_id).await?; + + // Build member-specific create params + let member_params = if let Some(ref pg) = pg_cluster { + pg.build_member_params( + spec, + &cluster_config_parsed, + monitor_hostname, + monitor_port, + member_port, + ) + } else { + return Err(ExternalServiceError::InitializationFailed { + id: service_id, + reason: "Only Postgres clusters are currently supported".to_string(), + }); + }; + + // Each cluster member uses a unique port assigned by the + // manager. Map container_port = host_port to avoid conflicts. + let volume_name = format!("{}_data", result.container_name); + let remote_params = RemoteServiceCreateParams { + name: result.container_name.clone(), + service_type: "postgres".to_string(), + image: member_params.image, + environment: member_params.environment, + port_mappings: vec![RemotePortMapping { + host_port: member_params.container_port, + container_port: member_params.container_port, + }], + volumes: HashMap::from([(volume_name, member_params.volume_path)]), + network: Some(temps_core::NETWORK_NAME.to_string()), + command: member_params.command, + }; + + let response = client.create_service(remote_params).await.map_err(|e| { + ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!( + "Failed to create cluster member '{}' on node {}: {}", + result.container_name, node_id, e + ), + } + })?; + + (response.container_id, Some(response.host_port as i32)) + } else { + // Local: create container directly via Docker + // For now, use the agent-style approach via local Docker + let member_params = if let Some(ref pg) = pg_cluster { + pg.build_member_params( + spec, + &cluster_config_parsed, + monitor_hostname, + monitor_port, + member_port, + ) + } else { + return Err(ExternalServiceError::InitializationFailed { + id: service_id, + reason: "Only Postgres clusters are currently supported".to_string(), + }); + }; + + // Pull image, create and start container locally + self.create_local_cluster_member(&result.container_name, &member_params) + .await + .map_err(|e| ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!( + "Failed to create local cluster member '{}': {}", + result.container_name, e + ), + })? + }; + + // Track this member for potential rollback + created_members.push(CreatedMember { + container_name: result.container_name.clone(), + node_id: spec.node_id, + }); + + // Wait for the member to be healthy before proceeding to the next + // This is important: monitor must be healthy before data nodes register + if spec.role == "monitor" { + info!( + "Waiting for monitor '{}' to become healthy...", + result.container_name + ); + self.wait_for_container_health(&result.container_name, 60) + .await + .map_err(|e| ExternalServiceError::InitializationFailed { + id: service_id, + reason: format!("Monitor failed health check: {}", e), + })?; + } + + // Update member record with container info and "running" status + let mut member_update: service_members::ActiveModel = member_model.into(); + member_update.container_id = Set(Some(container_id)); + member_update.port = Set(host_port); + member_update.status = Set("running".to_string()); + member_update.updated_at = Set(Utc::now()); + member_update.update(self.db.as_ref()).await?; + } + Ok(()) + } + .await; + + // If any member failed, roll back all previously created containers + if let Err(e) = create_result { + error!( + "Cluster member creation failed for service {}: {}. Rolling back {} created container(s).", + service_id, e, created_members.len() + ); + + for member in &created_members { + if let Some(node_id) = member.node_id { + // Remote: ask agent to remove the container + match self.get_remote_client(node_id).await { + Ok(client) => { + if let Err(rm_err) = client.remove_service(&member.container_name).await + { + error!( + "Rollback: failed to remove remote container '{}' on node {}: {}", + member.container_name, node_id, rm_err + ); + } else { + info!( + "Rollback: removed remote container '{}' on node {}", + member.container_name, node_id + ); + } + } + Err(client_err) => { + error!( + "Rollback: failed to get remote client for node {}: {}", + node_id, client_err + ); + } + } + } else { + // Local: remove container directly via Docker + if let Err(rm_err) = self + .docker + .remove_container( + &member.container_name, + Some(bollard::query_parameters::RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + { + error!( + "Rollback: failed to remove local container '{}': {}", + member.container_name, rm_err + ); + } else { + info!( + "Rollback: removed local container '{}'", + member.container_name + ); + } + + // Also remove the volume + let volume_name = format!("{}_data", member.container_name); + if let Err(vol_err) = self + .docker + .remove_volume( + &volume_name, + None::, + ) + .await + { + warn!( + "Rollback: failed to remove volume '{}': {}", + volume_name, vol_err + ); + } + } + } + + // Mark remaining service_members as "failed" instead of deleting them. + // This preserves the original member topology so the retry endpoint can + // reconstruct the member specs without user re-input. + if let Err(db_err) = service_members::Entity::update_many() + .col_expr(service_members::Column::Status, Expr::value("failed")) + .col_expr(service_members::Column::UpdatedAt, Expr::value(Utc::now())) + .filter(service_members::Column::ServiceId.eq(service_id)) + .exec(self.db.as_ref()) + .await + { + error!( + "Rollback: failed to update service_members status for service {}: {}", + service_id, db_err + ); + } + + return Err(e); + } + + // Update parent service status + let mut service_update: external_services::ActiveModel = service.into(); + service_update.status = Set("running".to_string()); + service_update.updated_at = Set(Utc::now()); + service_update.update(self.db.as_ref()).await?; + + info!("Cluster service {} initialized successfully", service_id); + Ok(()) + } + + /// Retry a failed cluster service initialization. + /// + /// Cleans up any leftover containers and service_members from the previous + /// attempt, then re-runs `initialize_cluster`. + /// + /// If `member_requests` is empty, the original member configuration is + /// reconstructed from the preserved `service_members` records (which are + /// now kept with "failed" status instead of being deleted on rollback). + pub async fn retry_cluster( + &self, + service_id: i32, + member_requests: &[ClusterMemberRequest], + ) -> Result { + let service = self.get_service(service_id).await?; + + if service.topology != "cluster" { + return Err(ExternalServiceError::ParameterValidationFailed { + service_id, + reason: "retry_cluster is only valid for cluster topology services".to_string(), + }); + } + + if service.status != "failed" && service.status != "creating" { + return Err(ExternalServiceError::ParameterValidationFailed { + service_id, + reason: format!( + "Service must be in 'failed' or 'creating' status to retry, current status: '{}'", + service.status + ), + }); + } + + info!( + "Retrying cluster initialization for service {} (current status: {})", + service_id, service.status + ); + + // Clean up any leftover service_members and their containers + let leftover_members = service_members::Entity::find() + .filter(service_members::Column::ServiceId.eq(service_id)) + .order_by_asc(service_members::Column::Ordinal) + .all(self.db.as_ref()) + .await?; + + // Reconstruct member specs from preserved records if none were provided + let effective_members: Vec = if member_requests.is_empty() { + if leftover_members.is_empty() { + return Err(ExternalServiceError::ParameterValidationFailed { + service_id, + reason: + "No member configuration provided and no previous member records found. \ + Please provide the members array in the retry request." + .to_string(), + }); + } + info!( + "Reconstructing member config from {} preserved records for service {}", + leftover_members.len(), + service_id + ); + leftover_members + .iter() + .map(|m| ClusterMemberRequest { + role: m.role.clone(), + node_id: m.node_id, + }) + .collect() + } else { + member_requests.to_vec() + }; + + for member in &leftover_members { + // Try to remove the container (ignore errors — it may not exist) + if let Some(node_id) = member.node_id { + if let Ok(client) = self.get_remote_client(node_id).await { + if let Err(e) = client.remove_service(&member.container_name).await { + warn!( + "Retry cleanup: failed to remove remote container '{}' on node {}: {}", + member.container_name, node_id, e + ); + } + } + } else { + let _ = self + .docker + .remove_container( + &member.container_name, + Some(bollard::query_parameters::RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await; + + // Also remove the volume + let volume_name = format!("{}_data", member.container_name); + let _ = self + .docker + .remove_volume( + &volume_name, + None::, + ) + .await; + } + } + + // Delete leftover member records + if !leftover_members.is_empty() { + service_members::Entity::delete_many() + .filter(service_members::Column::ServiceId.eq(service_id)) + .exec(self.db.as_ref()) + .await?; + info!( + "Retry cleanup: removed {} leftover member records for service {}", + leftover_members.len(), + service_id + ); + } + + // Update status to "creating" and clear previous error + let mut service_update: external_services::ActiveModel = service.into(); + service_update.status = Set("creating".to_string()); + service_update.error_message = Set(None); + service_update.updated_at = Set(Utc::now()); + service_update.update(self.db.as_ref()).await?; + + // Spawn background task to re-initialize (same pattern as create) + let db = self.db.clone(); + let docker = self.docker.clone(); + let encryption_service = self.encryption_service.clone(); + let members = effective_members; + + tokio::spawn(async move { + let manager = ExternalServiceManager::new(db.clone(), encryption_service, docker); + let result = manager.initialize_cluster(service_id, &members).await; + + match result { + Ok(()) => { + info!( + "Cluster service {} retry succeeded (background)", + service_id + ); + } + Err(e) => { + error!( + "Cluster service {} retry failed (background): {}", + service_id, e + ); + + let update_result: Result<_, sea_orm::DbErr> = async { + let mut svc: external_services::ActiveModel = + external_services::Entity::find_by_id(service_id) + .one(db.as_ref()) + .await? + .ok_or(sea_orm::DbErr::RecordNotFound( + "Service not found during retry rollback".to_string(), + ))? + .into(); + svc.status = Set("failed".to_string()); + svc.error_message = Set(Some(e.to_string())); + svc.updated_at = Set(Utc::now()); + svc.update(db.as_ref()).await?; + Ok(()) + } + .await; + + if let Err(db_err) = update_result { + error!( + "Failed to update service {} status to 'failed' after retry: {}", + service_id, db_err + ); + } + } + } + }); + + self.get_service_info(service_id).await + } + + /// Create a cluster member container on the local Docker daemon. + async fn create_local_cluster_member( + &self, + container_name: &str, + params: &crate::externalsvc::postgres_cluster::ClusterMemberCreateParams, + ) -> Result<(String, Option), ExternalServiceError> { + use bollard::models::*; + use bollard::query_parameters::*; + use futures::TryStreamExt; + + // Ensure network exists + crate::utils::ensure_network_exists(&self.docker) + .await + .map_err(|e| ExternalServiceError::DockerError { + id: 0, + reason: format!("Failed to ensure network: {}", e), + })?; + + // Pull image + self.docker + .create_image( + Some(CreateImageOptions { + from_image: Some(params.image.clone()), + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await + .map_err(|e| ExternalServiceError::DockerError { + id: 0, + reason: format!("Failed to pull image {}: {}", params.image, e), + })?; + + // Create volume + let volume_name = format!("{}_data", container_name); + let _ = self + .docker + .create_volume(bollard::models::VolumeCreateRequest { + name: Some(volume_name.clone()), + ..Default::default() + }) + .await; + + // Build env vars + let env: Vec = params + .environment + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect(); + + // Port bindings: map the container port to the same host port. + // Each cluster member uses a unique port assigned by the manager so + // there are no conflicts even when multiple members run on the same host. + let mut port_bindings = std::collections::HashMap::new(); + let container_port_key = format!("{}/tcp", params.container_port); + port_bindings.insert( + container_port_key.clone(), + Some(vec![PortBinding { + host_ip: Some("0.0.0.0".to_string()), + host_port: Some(params.container_port.to_string()), + }]), + ); + + // Create container + let container_config = ContainerCreateBody { + image: Some(params.image.clone()), + env: Some(env), + cmd: params.command.clone(), + host_config: Some(HostConfig { + binds: Some(vec![format!("{}:{}", volume_name, params.volume_path)]), + port_bindings: Some(port_bindings), + restart_policy: Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + maximum_retry_count: None, + }), + network_mode: Some(temps_core::NETWORK_NAME.to_string()), + ..Default::default() + }), + labels: Some(HashMap::from([ + ("sh.temps.managed".to_string(), "true".to_string()), + ("sh.temps.service".to_string(), "true".to_string()), + ( + "sh.temps.service.type".to_string(), + "postgres-cluster".to_string(), + ), + ( + "sh.temps.service.name".to_string(), + container_name.to_string(), + ), + ])), + ..Default::default() + }; + + let response = self + .docker + .create_container( + Some( + CreateContainerOptionsBuilder::new() + .name(container_name) + .build(), + ), + container_config, + ) + .await + .map_err(|e| ExternalServiceError::DockerError { + id: 0, + reason: format!("Failed to create container {}: {}", container_name, e), + })?; + + // Start container + self.docker + .start_container(container_name, None::) + .await + .map_err(|e| ExternalServiceError::DockerError { + id: 0, + reason: format!("Failed to start container {}: {}", container_name, e), + })?; + + // Each member uses a unique port — container_port == host_port + let host_port = Some(params.container_port as i32); + + Ok((response.id, host_port)) + } + + /// Wait for a container to become healthy (Docker health check). + async fn wait_for_container_health( + &self, + container_name: &str, + timeout_secs: u64, + ) -> Result<(), ExternalServiceError> { + use bollard::query_parameters::InspectContainerOptions; + use std::time::{Duration, Instant}; + + let start = Instant::now(); + let timeout = Duration::from_secs(timeout_secs); + + loop { + if start.elapsed() > timeout { + return Err(ExternalServiceError::InitializationFailed { + id: 0, + reason: format!( + "Container {} did not become healthy within {}s", + container_name, timeout_secs + ), + }); + } + + if let Ok(info) = self + .docker + .inspect_container(container_name, None::) + .await + { + let running = info.state.as_ref().and_then(|s| s.running).unwrap_or(false); + + if running { + // Check if container has a healthcheck and if it's healthy + let health_status = info + .state + .as_ref() + .and_then(|s| s.health.as_ref()) + .and_then(|h| h.status.as_ref()) + .map(|s| format!("{:?}", s)); + + match health_status.as_deref() { + Some("\"HEALTHY\"") | Some("Healthy") => return Ok(()), + None => { + // No healthcheck defined — just check if running + return Ok(()); + } + _ => {} // Still starting or unhealthy — keep waiting + } + } + } + // Container not found or not running yet — keep waiting + + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + } + } + async fn store_inferred_parameters( &self, service_id: i32, @@ -994,30 +2421,54 @@ impl ExternalServiceManager { } })?; - let service_instance = - self.create_service_instance(service.name.clone(), service_type_enum); + // Remote node — delegate to agent + if let Some(node_id) = service.node_id { + let client = self.get_remote_client(node_id).await?; + let container_name = + self.get_container_name_for_service(&service.name, &service_type_enum); - // Try to start the existing container first - match service_instance.start().await { - Ok(()) => {} - Err(e) => { - // If start failed (e.g., container was removed and in-memory config is empty), - // fall back to full initialization which loads config from DB and recreates - // the container. - info!( - "Direct start failed for service {} ({}), falling back to initialize: {}", - service_id, service.name, e - ); - self.initialize_service(service_id) - .await - .map_err(|init_err| ExternalServiceError::StartFailed { - id: service_id, - reason: format!( - "Start failed: {}. Re-initialize also failed: {}", - e, init_err - ), - })?; - return self.get_service_info(service_id).await; + match client.start_service(&container_name).await { + Ok(()) => {} + Err(e) => { + info!( + "Remote start failed for service {} ({}), falling back to initialize: {}", + service_id, service.name, e + ); + self.initialize_service(service_id) + .await + .map_err(|init_err| ExternalServiceError::StartFailed { + id: service_id, + reason: format!( + "Start failed: {}. Re-initialize also failed: {}", + e, init_err + ), + })?; + return self.get_service_info(service_id).await; + } + } + } else { + // Local node + let service_instance = + self.create_service_instance(service.name.clone(), service_type_enum); + + match service_instance.start().await { + Ok(()) => {} + Err(e) => { + info!( + "Direct start failed for service {} ({}), falling back to initialize: {}", + service_id, service.name, e + ); + self.initialize_service(service_id) + .await + .map_err(|init_err| ExternalServiceError::StartFailed { + id: service_id, + reason: format!( + "Start failed: {}. Re-initialize also failed: {}", + e, init_err + ), + })?; + return self.get_service_info(service_id).await; + } } } @@ -1042,17 +2493,31 @@ impl ExternalServiceManager { } })?; - let service_instance = - self.create_service_instance(service.name.clone(), service_type_enum); + // Remote node — delegate to agent + if let Some(node_id) = service.node_id { + let client = self.get_remote_client(node_id).await?; + let container_name = + self.get_container_name_for_service(&service.name, &service_type_enum); - // Stop the service - service_instance - .stop() - .await - .map_err(|e| ExternalServiceError::StopFailed { - id: service_id, - reason: e.to_string(), + client.stop_service(&container_name).await.map_err(|e| { + ExternalServiceError::StopFailed { + id: service_id, + reason: e.to_string(), + } })?; + } else { + // Local node + let service_instance = + self.create_service_instance(service.name.clone(), service_type_enum); + + service_instance + .stop() + .await + .map_err(|e| ExternalServiceError::StopFailed { + id: service_id, + reason: e.to_string(), + })?; + } // Update status to stopped let mut service_update: external_services::ActiveModel = service.into(); @@ -2283,6 +3748,10 @@ impl ExternalServiceManager { connection_info: None, created_at: external_service.created_at.to_rfc3339(), updated_at: external_service.updated_at.to_rfc3339(), + node_id: external_service.node_id, + topology: external_service.topology, + members: Vec::new(), + error_message: external_service.error_message, }) } } @@ -2396,6 +3865,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("18".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let result = manager.create_service(request).await; @@ -2430,6 +3902,9 @@ mod tests { service_type: ServiceType::Redis, version: Some("7".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let result = manager.create_service(request).await; @@ -2462,6 +3937,9 @@ mod tests { service_type: ServiceType::S3, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let result = manager.create_service(request).await; @@ -2496,6 +3974,9 @@ mod tests { service_type: ServiceType::Postgres, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2532,6 +4013,9 @@ mod tests { service_type: ServiceType::Redis, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2575,6 +4059,9 @@ mod tests { service_type: ServiceType::Postgres, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2626,6 +4113,9 @@ mod tests { service_type: ServiceType::Redis, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2657,6 +4147,9 @@ mod tests { service_type: ServiceType::Redis, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2692,6 +4185,9 @@ mod tests { service_type: ServiceType::Redis, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2749,6 +4245,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("16".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2812,6 +4311,9 @@ mod tests { service_type: ServiceType::Postgres, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -2862,6 +4364,9 @@ mod tests { service_type: ServiceType::Postgres, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let result = manager.create_service(request).await; @@ -2949,6 +4454,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("18".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3029,6 +4537,9 @@ mod tests { service_type: ServiceType::Redis, version: Some("7".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; // Attempt to create the service - should fail @@ -3112,6 +4623,9 @@ mod tests { service_type: ServiceType::Postgres, version: None, parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager.create_service(request).await.unwrap(); @@ -3163,6 +4677,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("16".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3238,6 +4755,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("16".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3297,6 +4817,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("16".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3361,6 +4884,9 @@ mod tests { service_type: ServiceType::Postgres, version: Some("18".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3415,6 +4941,9 @@ mod tests { service_type: ServiceType::Redis, version: Some("7".to_string()), parameters: params, + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), }; let service = manager @@ -3764,6 +5293,10 @@ mod tests { connection_info: Some("postgresql://localhost:5432/postgres".to_string()), created_at: "2025-01-12T10:30:00Z".to_string(), updated_at: "2025-01-12T10:30:00Z".to_string(), + node_id: None, + topology: "standalone".to_string(), + members: Vec::new(), + error_message: None, }; assert_eq!(service_info.id, 1); diff --git a/crates/temps-proxy/Cargo.toml b/crates/temps-proxy/Cargo.toml index 04118a18..54ad1f70 100644 --- a/crates/temps-proxy/Cargo.toml +++ b/crates/temps-proxy/Cargo.toml @@ -40,6 +40,8 @@ pingora-openssl = "0.8.0" rand = { workspace = true } hex = "0.4" sha2 = "0.10" +hmac = "0.12" +argon2 = { workspace = true } serde_json = { workspace = true } uuid = { workspace = true } mime_guess = "2.0" @@ -58,6 +60,7 @@ woothee = "0.13" rustls = "0.23" rustls-pemfile = "2.0" openssl = "0.10" +url = { workspace = true } [dev-dependencies] tokio-test = "0.4" diff --git a/crates/temps-proxy/password_wall/password_form.html b/crates/temps-proxy/password_wall/password_form.html new file mode 100644 index 00000000..9ea842ad --- /dev/null +++ b/crates/temps-proxy/password_wall/password_form.html @@ -0,0 +1,281 @@ + + + + + + Password Required - {{PROJECT_NAME}} + + + +
+
+ + + + +
+ +
+ + + {{PROJECT_NAME}} + + / + {{ENVIRONMENT_NAME}} +
+ +

Password Required

+

This environment is protected. Enter the password to continue.

+ +
+ + + + + + Incorrect password. Please try again. +
+ +
+ +
+ +
+ +
+ + +
+ + diff --git a/crates/temps-proxy/src/handler/mod.rs b/crates/temps-proxy/src/handler/mod.rs index 7e920c44..f0352207 100644 --- a/crates/temps-proxy/src/handler/mod.rs +++ b/crates/temps-proxy/src/handler/mod.rs @@ -3,5 +3,6 @@ pub mod captcha; #[allow(clippy::module_inception)] pub mod handler; pub mod ip_access_control; +pub mod password_wall; pub mod proxy_logs; pub mod types; diff --git a/crates/temps-proxy/src/handler/password_wall.rs b/crates/temps-proxy/src/handler/password_wall.rs new file mode 100644 index 00000000..4b685ac6 --- /dev/null +++ b/crates/temps-proxy/src/handler/password_wall.rs @@ -0,0 +1,187 @@ +//! Password wall handler for environment password protection. +//! +//! When an environment has password protection enabled, the proxy intercepts +//! requests and shows an HTML password form. After the user enters the correct +//! password, an HMAC-signed cookie is set so subsequent requests pass through. + +use hmac::{Hmac, Mac}; +use sha2::Sha256; + +/// Cookie name for password-protected environments +pub const PASSWORD_COOKIE_NAME: &str = "_temps_pw"; + +/// Cookie max age (7 days) +const COOKIE_MAX_AGE_SECS: u64 = 7 * 24 * 60 * 60; + +/// HTML template for the password form +const PASSWORD_FORM_HTML: &str = include_str!("../../password_wall/password_form.html"); + +type HmacSha256 = Hmac; + +/// Generate the password form HTML for a given redirect path. +pub fn generate_password_form_html( + redirect_path: &str, + show_error: bool, + project_name: &str, + environment_name: &str, +) -> String { + PASSWORD_FORM_HTML + .replace("{{REDIRECT_PATH}}", redirect_path) + .replace("{{PROJECT_NAME}}", &html_escape(project_name)) + .replace("{{ENVIRONMENT_NAME}}", &html_escape(environment_name)) + .replace( + "{{ERROR_DISPLAY}}", + if show_error { "flex" } else { "none" }, + ) + .replace( + "{{ERROR_INPUT_CLASS}}", + if show_error { "input-error" } else { "" }, + ) +} + +fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +/// Create an HMAC-signed cookie value for a given environment ID. +/// +/// The cookie value is `env_id:signature` where signature = HMAC-SHA256(env_id, secret). +/// The secret is derived from the password hash itself, so changing the password +/// invalidates all existing cookies. +pub fn create_cookie_value(environment_id: i32, password_hash: &str) -> String { + let payload = environment_id.to_string(); + let signature = compute_hmac(&payload, password_hash); + format!("{}:{}", payload, signature) +} + +/// Validate an HMAC-signed cookie value for a given environment ID. +pub fn validate_cookie(cookie_value: &str, environment_id: i32, password_hash: &str) -> bool { + let parts: Vec<&str> = cookie_value.splitn(2, ':').collect(); + if parts.len() != 2 { + return false; + } + + let payload = parts[0]; + let provided_signature = parts[1]; + + // Verify the environment ID matches + if payload != environment_id.to_string() { + return false; + } + + // Verify HMAC signature + let expected_signature = compute_hmac(payload, password_hash); + constant_time_eq(provided_signature.as_bytes(), expected_signature.as_bytes()) +} + +/// Verify a plaintext password against an argon2 hash. +pub fn verify_password(password: &str, hash: &str) -> bool { + use argon2::{Argon2, PasswordHash, PasswordVerifier}; + let Ok(parsed_hash) = PasswordHash::new(hash) else { + return false; + }; + Argon2::default() + .verify_password(password.as_bytes(), &parsed_hash) + .is_ok() +} + +/// Build the Set-Cookie header value for a password protection cookie. +pub fn build_set_cookie_header(environment_id: i32, password_hash: &str, host: &str) -> String { + let value = create_cookie_value(environment_id, password_hash); + // Strip port from host for the domain + let domain = host.split(':').next().unwrap_or(host); + format!( + "{}={}; Path=/; Max-Age={}; HttpOnly; SameSite=Lax; Domain={}", + PASSWORD_COOKIE_NAME, value, COOKIE_MAX_AGE_SECS, domain + ) +} + +fn compute_hmac(data: &str, key: &str) -> String { + let mut mac = + HmacSha256::new_from_slice(key.as_bytes()).expect("HMAC can take key of any size"); + mac.update(data.as_bytes()); + hex::encode(mac.finalize().into_bytes()) +} + +/// Constant-time comparison to prevent timing attacks. +fn constant_time_eq(a: &[u8], b: &[u8]) -> bool { + if a.len() != b.len() { + return false; + } + a.iter() + .zip(b.iter()) + .fold(0u8, |acc, (x, y)| acc | (x ^ y)) + == 0 +} + +#[cfg(test)] +mod tests { + use super::*; + + const TEST_ENV_ID: i32 = 42; + const TEST_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$test_salt$test_hash_value"; + + #[test] + fn test_create_and_validate_cookie() { + let value = create_cookie_value(TEST_ENV_ID, TEST_HASH); + assert!(validate_cookie(&value, TEST_ENV_ID, TEST_HASH)); + } + + #[test] + fn test_validate_cookie_wrong_env_id() { + let value = create_cookie_value(TEST_ENV_ID, TEST_HASH); + assert!(!validate_cookie(&value, 99, TEST_HASH)); + } + + #[test] + fn test_validate_cookie_wrong_hash() { + let value = create_cookie_value(TEST_ENV_ID, TEST_HASH); + assert!(!validate_cookie(&value, TEST_ENV_ID, "different_hash")); + } + + #[test] + fn test_validate_cookie_tampered() { + assert!(!validate_cookie("42:bad_signature", TEST_ENV_ID, TEST_HASH)); + } + + #[test] + fn test_validate_cookie_malformed() { + assert!(!validate_cookie("garbage", TEST_ENV_ID, TEST_HASH)); + assert!(!validate_cookie("", TEST_ENV_ID, TEST_HASH)); + } + + #[test] + fn test_generate_password_form_html() { + let html = generate_password_form_html("/some/path", false, "My Project", "staging"); + assert!(html.contains("/_temps/password-verify")); + assert!(html.contains("/some/path")); + assert!(html.contains("My Project")); + assert!(html.contains("staging")); + assert!(html.contains("display: none")); + } + + #[test] + fn test_generate_password_form_html_with_error() { + let html = generate_password_form_html("/", true, "App", "production"); + assert!(html.contains("display: flex")); + assert!(html.contains("input-error")); + } + + #[test] + fn test_generate_password_form_html_escapes_html() { + let html = generate_password_form_html("/", false, "", "test"); + assert!(!html.contains("")); + assert!(html.contains("<script>")); + } + + #[test] + fn test_build_set_cookie_header() { + let header = build_set_cookie_header(TEST_ENV_ID, TEST_HASH, "example.com:443"); + assert!(header.starts_with("_temps_pw=")); + assert!(header.contains("Domain=example.com")); + assert!(header.contains("HttpOnly")); + } +} diff --git a/crates/temps-proxy/src/on_demand.rs b/crates/temps-proxy/src/on_demand.rs index 29aef449..4ec0d10d 100644 --- a/crates/temps-proxy/src/on_demand.rs +++ b/crates/temps-proxy/src/on_demand.rs @@ -16,6 +16,7 @@ use sea_orm::{ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; +use temps_core::OnDemandWaker; use temps_entities::{deployment_containers, environments}; use thiserror::Error; use tokio::sync::Notify; @@ -278,25 +279,58 @@ impl OnDemandManager { .all(self.db.as_ref()) .await?; - // Stop all containers in parallel + // Stop all containers in parallel, tracking failures let stop_futures: Vec<_> = containers .iter() .map(|c| { let container_id = c.container_id.clone(); let lifecycle = Arc::clone(&self.container_lifecycle); async move { - if let Err(e) = lifecycle.stop_container(&container_id).await { - warn!( - container_id = %container_id, - error = %e, - "Failed to stop container during sleep" - ); + match lifecycle.stop_container(&container_id).await { + Ok(()) => Ok(container_id), + Err(e) => { + warn!( + container_id = %container_id, + error = %e, + "Failed to stop container during sleep" + ); + Err((container_id, e)) + } } } }) .collect(); - futures::future::join_all(stop_futures).await; + let results = futures::future::join_all(stop_futures).await; + + let failed: Vec<_> = results.iter().filter(|r| r.is_err()).collect(); + if !failed.is_empty() { + // Some containers failed to stop — revert sleeping state to avoid + // inconsistency where DB says sleeping but containers are still running + error!( + environment_id = environment_id, + failed_count = failed.len(), + total = containers.len(), + "Failed to stop some containers during sleep, reverting sleeping state" + ); + let _ = self + .db + .execute(Statement::from_sql_and_values( + sea_orm::DatabaseBackend::Postgres, + "UPDATE environments SET sleeping = false WHERE id = $1", + [environment_id.into()], + )) + .await; + self.notify_route_change().await; + return Err(OnDemandError::ContainerOperation { + container_id: "multiple".to_string(), + reason: format!( + "Failed to stop {}/{} containers during sleep", + failed.len(), + containers.len() + ), + }); + } info!( environment_id = environment_id, @@ -612,6 +646,30 @@ impl OnDemandManager { } } +/// Bridge implementation so the proxy's OnDemandManager can be injected into +/// the environments handler AppState via the plugin system. +#[async_trait] +impl OnDemandWaker for OnDemandManager { + async fn wake_environment( + &self, + environment_id: i32, + wake_timeout_seconds: i32, + ) -> Result<(), Box> { + self.wake_environment(environment_id, wake_timeout_seconds) + .await + .map_err(|e| Box::new(e) as Box) + } + + async fn sleep_environment( + &self, + environment_id: i32, + ) -> Result> { + self.sleep_environment(environment_id) + .await + .map_err(|e| Box::new(e) as Box) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/temps-proxy/src/proxy.rs b/crates/temps-proxy/src/proxy.rs index 70cc2ce8..b314ff4c 100644 --- a/crates/temps-proxy/src/proxy.rs +++ b/crates/temps-proxy/src/proxy.rs @@ -2199,6 +2199,142 @@ impl ProxyHttp for LoadBalancer { )); } } + + // Password wall: check if environment has password protection enabled + let password_protection = project_ctx + .environment + .deployment_config + .as_ref() + .and_then(|dc| dc.security.as_ref()) + .and_then(|s| s.password_protection.as_ref()) + .filter(|pp| pp.enabled); + + if let Some(pp) = password_protection { + let password_hash = pp.password_hash.clone(); + let env_id = project_ctx.environment.id; + let project_name = &project_ctx.project.name; + let environment_name = &project_ctx.environment.name; + + // Check if this is the password verify POST endpoint + if ctx.path == "/_temps/password-verify" && ctx.method == "POST" { + // Read the POST body to get the password + let body = session.read_request_body().await.map_err(|e| { + error!("Failed to read password verify body: {}", e); + e + })?; + + let body_str = body + .as_ref() + .map(|b| String::from_utf8_lossy(b).to_string()) + .unwrap_or_default(); + + // Parse form data (application/x-www-form-urlencoded) + let params: Vec<(String, String)> = + url::form_urlencoded::parse(body_str.as_bytes()) + .into_owned() + .collect(); + + let password = params + .iter() + .find(|(k, _)| k == "password") + .map(|(_, v)| v.as_str()) + .unwrap_or(""); + + let redirect = params + .iter() + .find(|(k, _)| k == "redirect") + .map(|(_, v)| v.as_str()) + .unwrap_or("/"); + + if crate::handler::password_wall::verify_password(password, &password_hash) { + // Password correct — set cookie and redirect + let host = ctx.host.clone(); + let set_cookie = crate::handler::password_wall::build_set_cookie_header( + env_id, + &password_hash, + &host, + ); + + let mut resp = ResponseHeader::build(303, None)?; + resp.insert_header("Location", redirect)?; + resp.insert_header("Set-Cookie", &set_cookie)?; + resp.insert_header("Cache-Control", "no-store")?; + resp.insert_header("X-Request-ID", &ctx.request_id)?; + + session.write_response_header(Box::new(resp), true).await?; + ctx.routing_status = "password_verified".to_string(); + return Ok(true); + } else { + // Wrong password — show form again with error + let html = crate::handler::password_wall::generate_password_form_html( + redirect, + true, + project_name, + environment_name, + ); + let html_bytes = Bytes::from(html); + + let mut resp = ResponseHeader::build(StatusCode::OK, None)?; + resp.insert_header("Content-Type", "text/html; charset=utf-8")?; + resp.insert_header("Cache-Control", "no-store")?; + resp.insert_header("X-Request-ID", &ctx.request_id)?; + + session.write_response_header(Box::new(resp), false).await?; + session.write_response_body(Some(html_bytes), true).await?; + ctx.routing_status = "password_wrong".to_string(); + return Ok(true); + } + } + + // Check for valid password cookie + let has_valid_cookie = session + .req_header() + .headers + .get_all("Cookie") + .iter() + .filter_map(|h| h.to_str().ok()) + .flat_map(|s| Cookie::split_parse(s).filter_map(Result::ok)) + .find(|c| c.name() == crate::handler::password_wall::PASSWORD_COOKIE_NAME) + .map(|c| { + crate::handler::password_wall::validate_cookie( + c.value(), + env_id, + &password_hash, + ) + }) + .unwrap_or(false); + + if !has_valid_cookie { + // No valid cookie — show password form + let current_path = if let Some(ref qs) = ctx.query_string { + if qs.is_empty() { + ctx.path.clone() + } else { + format!("{}?{}", ctx.path, qs) + } + } else { + ctx.path.clone() + }; + + let html = crate::handler::password_wall::generate_password_form_html( + ¤t_path, + false, + project_name, + environment_name, + ); + let html_bytes = Bytes::from(html); + + let mut resp = ResponseHeader::build(StatusCode::OK, None)?; + resp.insert_header("Content-Type", "text/html; charset=utf-8")?; + resp.insert_header("Cache-Control", "no-store")?; + resp.insert_header("X-Request-ID", &ctx.request_id)?; + + session.write_response_header(Box::new(resp), false).await?; + session.write_response_body(Some(html_bytes), true).await?; + ctx.routing_status = "password_wall".to_string(); + return Ok(true); + } + } } else { ctx.routing_status = "no_project".to_string(); } diff --git a/crates/temps-wireguard/Cargo.toml b/crates/temps-wireguard/Cargo.toml index ae0b57f0..59b4b65b 100644 --- a/crates/temps-wireguard/Cargo.toml +++ b/crates/temps-wireguard/Cargo.toml @@ -16,3 +16,7 @@ serde_json = { workspace = true } rand = { workspace = true } base64 = { workspace = true } temps-core = { path = "../temps-core" } + +# Embedded WireGuard (no external wireguard-tools dependency) +defguard_wireguard_rs = "0.9" +x25519-dalek = { version = "2.0", features = ["static_secrets"] } diff --git a/crates/temps-wireguard/src/lib.rs b/crates/temps-wireguard/src/lib.rs index ca48d7e2..f2e21023 100644 --- a/crates/temps-wireguard/src/lib.rs +++ b/crates/temps-wireguard/src/lib.rs @@ -1,20 +1,21 @@ //! WireGuard mesh networking for Temps multi-node deployments. //! -//! Wraps the `wg` and `ip` CLI commands to manage WireGuard interfaces -//! and peer connections. WireGuard is in-kernel on Linux 5.6+, so no -//! additional installation is required on modern Linux systems. +//! Uses `defguard_wireguard_rs` for embedded userspace WireGuard — no external +//! `wireguard-tools` package or kernel module required. The WireGuard protocol +//! runs in-process via boringtun (Cloudflare's Rust implementation). +use base64::{engine::general_purpose::STANDARD as BASE64, Engine}; use serde::{Deserialize, Serialize}; use std::net::Ipv4Addr; use thiserror::Error; #[derive(Error, Debug)] pub enum WireGuardError { - #[error("WireGuard command failed: {command} — {reason}")] - CommandFailed { command: String, reason: String }, + #[error("WireGuard operation failed: {operation} — {reason}")] + OperationFailed { operation: String, reason: String }, - #[error("WireGuard not available on this system: {0}")] - NotAvailable(String), + #[error("WireGuard interface error: {0}")] + InterfaceError(String), #[error("No available IP addresses in subnet {subnet}")] SubnetExhausted { subnet: String }, @@ -22,7 +23,7 @@ pub enum WireGuardError { #[error("Invalid configuration: {0}")] InvalidConfig(String), - #[error("IO error running WireGuard command: {0}")] + #[error("IO error: {0}")] Io(#[from] std::io::Error), #[error("Interface {interface} already exists")] @@ -51,6 +52,9 @@ pub struct WireGuardKeypair { } /// Manages a WireGuard interface for the Temps mesh network. +/// +/// Uses embedded userspace WireGuard via defguard/boringtun — no external +/// `wg` or `ip` CLI tools required. #[derive(Debug)] pub struct WireGuardManager { /// Interface name, e.g. "wg0" @@ -108,56 +112,22 @@ impl WireGuardManager { Self::new("wg0", &subnet, port) } - /// Check if WireGuard CLI tools are available on this system. + /// Check if WireGuard is available. + /// + /// With embedded userspace WireGuard this always succeeds — no external tools needed. pub async fn check_available(&self) -> Result<(), WireGuardError> { - let output = tokio::process::Command::new("wg") - .arg("--version") - .output() - .await - .map_err(|e| WireGuardError::NotAvailable(format!("Failed to run 'wg': {}", e)))?; - - if !output.status.success() { - return Err(WireGuardError::NotAvailable( - "wg command returned non-zero exit code".into(), - )); - } - Ok(()) } - /// Generate a new WireGuard keypair using `wg genkey` and `wg pubkey`. + /// Generate a new WireGuard keypair using pure Rust cryptography. + /// + /// Uses x25519-dalek for Curve25519 key generation — no `wg genkey` needed. pub async fn generate_keypair(&self) -> Result { - let genkey_output = tokio::process::Command::new("wg") - .arg("genkey") - .output() - .await?; - - if !genkey_output.status.success() { - return Err(WireGuardError::CommandFailed { - command: "wg genkey".into(), - reason: String::from_utf8_lossy(&genkey_output.stderr).to_string(), - }); - } - - let private_key = String::from_utf8_lossy(&genkey_output.stdout) - .trim() - .to_string(); - - // Pipe private key through wg pubkey - let child = tokio::process::Command::new("sh") - .arg("-c") - .arg(format!("echo '{}' | wg pubkey", private_key)) - .output() - .await?; - - if !child.status.success() { - return Err(WireGuardError::CommandFailed { - command: "wg pubkey".into(), - reason: String::from_utf8_lossy(&child.stderr).to_string(), - }); - } + let secret = x25519_dalek::StaticSecret::random_from_rng(rand::rngs::OsRng); + let public = x25519_dalek::PublicKey::from(&secret); - let public_key = String::from_utf8_lossy(&child.stdout).trim().to_string(); + let private_key = BASE64.encode(secret.as_bytes()); + let public_key = BASE64.encode(public.as_bytes()); Ok(WireGuardKeypair { private_key, @@ -167,58 +137,61 @@ impl WireGuardManager { /// Initialize the WireGuard interface with the given IP address and private key. /// - /// Creates the interface, assigns the IP, sets the private key, and brings it up. + /// Creates a userspace WireGuard interface via defguard/boringtun. + /// No external `wg` or `ip` CLI tools are needed. pub async fn init_interface( &self, ip: Ipv4Addr, private_key: &str, ) -> Result<(), WireGuardError> { - // Create the WireGuard interface - self.run_command( - "ip", - &["link", "add", "dev", &self.interface, "type", "wireguard"], - ) - .await?; - - // Assign IP address - let addr = format!("{}/{}", ip, self.subnet_mask); - self.run_command("ip", &["address", "add", "dev", &self.interface, &addr]) - .await?; - - // Write private key to a temporary file and configure - let key_path = format!("/tmp/temps-wg-{}.key", self.interface); - tokio::fs::write(&key_path, private_key).await?; - - // Set permissions - self.run_command("chmod", &["600", &key_path]).await?; - - // Configure WireGuard with private key and listen port - let port_str = self.listen_port.to_string(); - self.run_command( - "wg", - &[ - "set", - &self.interface, - "listen-port", - &port_str, - "private-key", - &key_path, - ], - ) - .await?; - - // Clean up key file - let _ = tokio::fs::remove_file(&key_path).await; - - // Bring the interface up - self.run_command("ip", &["link", "set", "up", "dev", &self.interface]) - .await?; + use defguard_wireguard_rs::{ + InterfaceConfiguration, Userspace, WGApi, WireguardInterfaceApi, + }; + use std::str::FromStr; + + let mut wgapi = WGApi::::new(self.interface.clone()).map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to create WireGuard API for {}: {}", + self.interface, e + )) + })?; + + // Create the userspace WireGuard interface (TUN device via boringtun) + wgapi.create_interface().map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to create interface {}: {}", + self.interface, e + )) + })?; + + // Configure the interface with private key, port, and address + let addr_str = format!("{}/{}", ip, self.subnet_mask); + let address = defguard_wireguard_rs::net::IpAddrMask::from_str(&addr_str).map_err(|e| { + WireGuardError::InvalidConfig(format!("Invalid address {}: {}", addr_str, e)) + })?; + + let config = InterfaceConfiguration { + name: self.interface.clone(), + prvkey: private_key.to_string(), + addresses: vec![address], + port: self.listen_port, + peers: Vec::new(), + mtu: None, + fwmark: None, + }; + + wgapi.configure_interface(&config).map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to configure interface {}: {}", + self.interface, e + )) + })?; tracing::info!( interface = %self.interface, ip = %ip, port = %self.listen_port, - "WireGuard interface initialized" + "WireGuard interface initialized (embedded userspace)" ); Ok(()) @@ -226,26 +199,53 @@ impl WireGuardManager { /// Add a peer to the WireGuard interface. pub async fn add_peer(&self, peer: &WireGuardPeer) -> Result<(), WireGuardError> { - let mut args = vec![ - "set", - &self.interface, - "peer", - &peer.public_key, - "allowed-ips", - &peer.allowed_ips, - ]; - - // Only set endpoint if provided (peer may be behind NAT) + use defguard_wireguard_rs::{Userspace, WGApi, WireguardInterfaceApi}; + + let wgapi = WGApi::::new(self.interface.clone()).map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to create WireGuard API for {}: {}", + self.interface, e + )) + })?; + + // Parse the base64 public key into a Key + let key: defguard_wireguard_rs::key::Key = + peer.public_key.as_str().try_into().map_err(|e| { + WireGuardError::InvalidConfig(format!( + "Invalid peer public key '{}': {:?}", + peer.public_key, e + )) + })?; + + let mut wg_peer = defguard_wireguard_rs::peer::Peer::new(key); + + // Parse endpoint if !peer.endpoint.is_empty() { - args.push("endpoint"); - args.push(&peer.endpoint); + wg_peer.set_endpoint(&peer.endpoint).map_err(|e| { + WireGuardError::InvalidConfig(format!( + "Invalid peer endpoint '{}': {}", + peer.endpoint, e + )) + })?; + } + + // Parse allowed IPs + if let Ok(addr_mask) = peer + .allowed_ips + .parse::() + { + wg_peer.allowed_ips.push(addr_mask); } // Enable persistent keepalive for NAT traversal - args.push("persistent-keepalive"); - args.push("25"); + wg_peer.persistent_keepalive_interval = Some(25); - self.run_command("wg", &args).await?; + wgapi + .configure_peer(&wg_peer) + .map_err(|e| WireGuardError::OperationFailed { + operation: format!("add peer {}", peer.public_key), + reason: format!("{}", e), + })?; tracing::info!( interface = %self.interface, @@ -260,11 +260,25 @@ impl WireGuardManager { /// Remove a peer from the WireGuard interface. pub async fn remove_peer(&self, public_key: &str) -> Result<(), WireGuardError> { - self.run_command( - "wg", - &["set", &self.interface, "peer", public_key, "remove"], - ) - .await?; + use defguard_wireguard_rs::{Userspace, WGApi, WireguardInterfaceApi}; + + let wgapi = WGApi::::new(self.interface.clone()).map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to create WireGuard API for {}: {}", + self.interface, e + )) + })?; + + let key: defguard_wireguard_rs::key::Key = public_key.try_into().map_err(|e| { + WireGuardError::InvalidConfig(format!("Invalid public key '{}': {:?}", public_key, e)) + })?; + + wgapi + .remove_peer(&key) + .map_err(|e| WireGuardError::OperationFailed { + operation: format!("remove peer {}", public_key), + reason: format!("{}", e), + })?; tracing::info!( interface = %self.interface, @@ -277,8 +291,21 @@ impl WireGuardManager { /// Tear down the WireGuard interface. pub async fn destroy_interface(&self) -> Result<(), WireGuardError> { - self.run_command("ip", &["link", "del", "dev", &self.interface]) - .await?; + use defguard_wireguard_rs::{Userspace, WGApi, WireguardInterfaceApi}; + + let wgapi = WGApi::::new(self.interface.clone()).map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to create WireGuard API for {}: {}", + self.interface, e + )) + })?; + + wgapi.remove_interface().map_err(|e| { + WireGuardError::InterfaceError(format!( + "Failed to remove interface {}: {}", + self.interface, e + )) + })?; tracing::info!( interface = %self.interface, @@ -321,24 +348,6 @@ impl WireGuardManager { pub fn listen_port(&self) -> u16 { self.listen_port } - - /// Run a system command and return an error if it fails. - async fn run_command(&self, program: &str, args: &[&str]) -> Result<(), WireGuardError> { - let output = tokio::process::Command::new(program) - .args(args) - .output() - .await?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); - return Err(WireGuardError::CommandFailed { - command: format!("{} {}", program, args.join(" ")), - reason: stderr, - }); - } - - Ok(()) - } } #[cfg(test)] @@ -404,4 +413,43 @@ mod tests { assert_eq!(deserialized.endpoint, peer.endpoint); assert_eq!(deserialized.allowed_ips, peer.allowed_ips); } + + #[tokio::test] + async fn test_generate_keypair_produces_valid_keys() { + let manager = WireGuardManager::new("wg0", "10.100.0.0/24", 51820).unwrap(); + let keypair = manager.generate_keypair().await.unwrap(); + + // Keys should be valid base64 + let private_bytes = BASE64.decode(&keypair.private_key).unwrap(); + let public_bytes = BASE64.decode(&keypair.public_key).unwrap(); + + // Keys should be 32 bytes (Curve25519) + assert_eq!(private_bytes.len(), 32); + assert_eq!(public_bytes.len(), 32); + + // Public key should derive from private key + let secret = x25519_dalek::StaticSecret::from( + <[u8; 32]>::try_from(private_bytes.as_slice()).unwrap(), + ); + let expected_public = x25519_dalek::PublicKey::from(&secret); + assert_eq!(public_bytes, expected_public.as_bytes()); + } + + #[tokio::test] + async fn test_generate_keypair_unique() { + let manager = WireGuardManager::new("wg0", "10.100.0.0/24", 51820).unwrap(); + let kp1 = manager.generate_keypair().await.unwrap(); + let kp2 = manager.generate_keypair().await.unwrap(); + + // Two keypairs should be different + assert_ne!(kp1.private_key, kp2.private_key); + assert_ne!(kp1.public_key, kp2.public_key); + } + + #[tokio::test] + async fn test_check_available_always_succeeds() { + let manager = WireGuardManager::new("wg0", "10.100.0.0/24", 51820).unwrap(); + // Embedded WireGuard is always available + assert!(manager.check_available().await.is_ok()); + } } diff --git a/web/src/api/client/@tanstack/react-query.gen.ts b/web/src/api/client/@tanstack/react-query.gen.ts index 93c318ec..78899552 100644 --- a/web/src/api/client/@tanstack/react-query.gen.ts +++ b/web/src/api/client/@tanstack/react-query.gen.ts @@ -1,8 +1,8 @@ // This file is auto-generated by @hey-api/openapi-ts -import { type Options, getPlatformInfo, chunkUploadOptions, createRelease, listReleaseFiles, uploadReleaseFile, recordEventMetrics, addSessionReplayEvents, initSessionReplay, recordSpeedMetrics, updateSpeedMetrics, getPricing, listProviderKeys, createProviderKey, testProviderKeyInline, deleteProviderKey, updateProviderKey, testProviderKeyById, getUsageByProvider, getConversations, getConversationDetail, getUsageRecent, getUsageSummary, getUsageTimeseries, getUsageTopModels, chatCompletions, embeddings, listModels, getActiveVisitors, getEventDetail, getEventVisitors, getEventsCount, getGeneralStats, getLiveVisitorsList, getPageFlow, getPageHourlySessions, getPagePathDetail, getPagePathVisitors, getPagePaths, getPagePathsSparklines, getRecentActivity, getSessionDetails, getSessionEvents, getSessionLogs, getVisitors, getVisitorByGuid, getVisitorById, getVisitorDetails, enrichVisitor, getVisitorInfo, getVisitorJourney, getVisitorSessions, getVisitorStats, listApiKeys, createApiKey, getApiKeyPermissions, deleteApiKey, getApiKey, updateApiKey, activateApiKey, deactivateApiKey, emailStatus, login, requestMagicLink, verifyMagicLink, requestPasswordReset, resetPassword, verifyEmail, verifyMfaChallenge, runExternalServiceBackup, listS3Sources, createS3Source, deleteS3Source, getS3Source, updateS3Source, listSourceBackups, runBackupForSource, listBackupSchedules, createBackupSchedule, deleteBackupSchedule, getBackupSchedule, listBackupsForSchedule, disableBackupSchedule, enableBackupSchedule, getBackup, blobDelete, blobList, blobPut, blobCopy, blobDisable, blobEnable, blobStatus, blobUpdate, blobDownload, getDashboardProjectsAnalytics, getActivityGraph, getScanByDeployment, listProviders, createProvider, deleteProvider, getProvider, updateProvider, listManagedDomains, addManagedDomain, testProviderConnection, listProviderZones, removeManagedDomain, verifyManagedDomain, lookupDnsARecords, listDomains, createDomain, getDomainByHost, cancelDomainOrder, getDomainOrder, createOrRecreateOrder, finalizeOrder, setupDnsChallenge, deleteDomain, getDomainById, getChallengeToken, getHttpChallengeDebug, provisionDomain, renewDomain, checkDomainStatus, listDomains2, createDomain2, getDomainByName, deleteDomain2, getDomain, getDomainDnsRecords, setupDns, verifyDomain, listProviders2, createProvider2, deleteProvider2, getProvider2, testProvider, listEmails, sendEmail, getEmailStats, validateEmail, getEmail, listServices, createService, listAvailableContainers, getServiceBySlug, importExternalService, listProjectServices, getProjectServiceEnvironmentVariables, getProvidersMetadata, getProviderMetadata, getServiceTypes, getServiceTypeParameters, deleteService, getService, updateService, getServicePreviewEnvironmentVariablesMasked, getServicePreviewEnvironmentVariableNames, listServiceProjects, linkServiceToProject, unlinkServiceFromProject, getServiceEnvironmentVariables, getServiceEnvironmentVariable, startService, stopService, upgradeService, listRootContainers, listContainersAtPath, listEntities, getEntityInfo, queryData, downloadObject, getContainerInfo, checkExplorerSupport, getFile, getIpGeolocation, listConnections, deleteConnection, activateConnection, deactivateConnection, listRepositoriesByConnection, syncRepositories, updateConnectionToken, validateConnection, listGitProviders, createGitProvider, createGithubPatProvider, createGitlabOauthProvider, createGitlabPatProvider, deleteProvider3, getGitProvider, activateProvider, handleGitProviderOauthCallback, getProviderConnections, deactivateProvider, checkProviderDeletionSafety, startGitProviderOauth, deleteProviderSafely, getPublicRepository, getPublicBranches, detectPublicPresets, discoverWorkloads, executeImport, createPlan, listSources, getImportStatus, getIncident, updateIncidentStatus, getIncidentUpdates, adminListNodes, registerNode, adminRemoveNode, adminGetNode, adminListNodeContainers, adminUndrainNode, adminDrainStatus, adminDrainNode, nodeHeartbeat, listIpAccessControl, createIpAccessControl, checkIpBlocked, deleteIpAccessControl, getIpAccessControl, updateIpAccessControl, kvDel, kvDisable, kvEnable, kvExpire, kvGet, kvIncr, kvKeys, kvSet, kvStatus, kvTtl, kvUpdate, listRoutes, createRoute, deleteRoute, getRoute, updateRoute, logout, getLogContext, searchLogs, tailLogs, deleteMonitor, getMonitor, getBucketedStatus, getCurrentMonitorStatus, getUptimeHistory, deletePreferences, getPreferences, updatePreferences, listNotificationProviders, createNotificationProvider, createEmailProvider, updateEmailProvider, createSlackProvider, updateSlackProvider, createWebhookProvider, updateWebhookProvider, deleteProvider4, getNotificationProvider, updateProvider2, testProvider2, listOrders, queryGenaiTraces, getGenaiTrace, getHealth, listInsights, queryLogs, listMetricNames, queryMetrics, getPipelineStats, getQuota, queryTraceSummaries, queryTraces, getTrace, ingestLogs, ingestMetrics, ingestTraces, ingestLogsByPath, ingestMetricsByPath, ingestTracesByPath, hasPerformanceMetrics, getPerformanceMetrics, getMetricsOverTime, getGroupedPageMetrics, getAccessInfo, getPrivateIp, getPublicIp, listPresets, generatePresetDockerfile, getProjects, createProject, getProjectBySlug, createProjectFromTemplate, getProjectStatistics, deleteProject, getProject, updateProject, getProjectDeployments, getLastDeployment, triggerProjectPipeline, getActiveVisitors2, getAggregatedBuckets, updateAutomaticDeploy, listCustomDomainsForProject, createCustomDomain, deleteCustomDomain, getCustomDomain, updateCustomDomain, linkCustomDomainToCertificate, updateProjectDeploymentConfig, getDeployment, cancelDeployment, getDeploymentJobs, getDeploymentJobLogs, tailDeploymentJobLogs, getDeploymentOperations, executeDeploymentOperation, getDeploymentOperationStatus, pauseDeployment, promoteDeployment, resumeDeployment, rollbackToDeployment, teardownDeployment, listDsns, createDsn, getOrCreateDsn, regenerateDsn, revokeDsn, getEnvironmentVariables, createEnvironmentVariable, getEnvironmentVariableValue, deleteEnvironmentVariable, updateEnvironmentVariable, getEnvironments, createEnvironment, deleteEnvironment, getEnvironment, getEnvironmentCrons, getCronById, getCronExecutions, getEnvironmentDomains, addEnvironmentDomain, deleteEnvironmentDomain, updateEnvironmentSettings, sleepEnvironment, teardownEnvironment, wakeEnvironment, getContainerLogs, listContainers, getContainerDetail, getContainerLogsById, getContainerMetrics, streamContainerMetrics, restartContainer, startContainer, stopContainer, deployFromImage, deployFromImageUpload, deployFromStatic, getErrorDashboardStats, listErrorGroups, getErrorGroup, updateErrorGroup, listErrorEvents, getErrorEvent, getErrorStats, getErrorTimeSeries, getEventsCount2, getEventTypeBreakdown, recordConsoleEvent, getPropertyBreakdown, getPropertyTimeline, getEventsTimeline, getUniqueEvents, listExternalImages, registerExternalImage, deleteExternalImage, getExternalImage, listFunnels, createFunnel, previewFunnelMetrics, deleteFunnel, updateFunnel, getFunnelMetrics, updateGitSettings, hasErrorGroups, hasAnalyticsEvents, getHourlyVisits, listExternalImages2, pushExternalImage, getExternalImage2, listIncidents, createIncident, getBucketedIncidents, purgeProjectLogs, listMonitors, createMonitor, deleteReleaseSourceMaps, listSourceMaps, uploadSourceMap, updateProjectSettings, listReleases, deleteSourceMap, listStaticBundles, deleteStaticBundle, getStaticBundle, getStatusOverview, getUniqueCounts, uploadStaticBundle, listProjectScans, triggerScan, getLatestScansPerEnvironment, getLatestScan, listWebhooks, createWebhook, deleteWebhook, getWebhook, updateWebhook, listDeliveries, getDelivery, retryDelivery, getProxyLogs, getProxyLogByRequestId, getTimeBucketStats, getTodayStats, getProxyLogById, listSyncedRepositories, getRepositoryByName, getAllRepositoriesByName, getRepositoryPresetByName, getRepositoryBranches, getRepositoryTags, getRepositoryPresetLive, getBranchesByRepositoryId, listCommitsByRepositoryId, checkCommitExists, getTagsByRepositoryId, getProjectSessionReplays, getSessionEvents2, getSettings, updateSettings, revokeJoinToken, generateJoinToken, getJoinTokenStatus, listTemplates, listTemplateTags, getTemplate, getCurrentUser, listUsers, createUser, updateSelf, disableMfa, setupMfa, verifyAndEnableMfa, deleteUser, updateUser, restoreUser, assignRole, removeRole, getVisitorSessions2, deleteSessionReplay, getSessionReplay, updateSessionDuration, getSessionReplayEvents, addEvents, deleteScan, getScan, getScanVulnerabilities, listEventTypes, triggerWeeklyDigest, listExternalPlugins, reloadPlugins, ingestSentryEnvelope, ingestSentryEvent, listAuditLogs, getAuditLog } from '../sdk.gen'; +import { type Options, getPlatformInfo, chunkUploadOptions, createRelease, listReleaseFiles, uploadReleaseFile, recordEventMetrics, addSessionReplayEvents, initSessionReplay, recordSpeedMetrics, updateSpeedMetrics, getPricing, listProviderKeys, createProviderKey, testProviderKeyInline, deleteProviderKey, updateProviderKey, testProviderKeyById, getUsageByProvider, getConversations, getConversationDetail, getUsageRecent, getUsageSummary, getUsageTimeseries, getUsageTopModels, chatCompletions, embeddings, listModels, getActiveVisitors, getEventDetail, getEventVisitors, getEventsCount, getGeneralStats, getLiveVisitorsList, getPageFlow, getPageHourlySessions, getPagePathDetail, getPagePathVisitors, getPagePaths, getPagePathsSparklines, getRecentActivity, getSessionDetails, getSessionEvents, getSessionLogs, getVisitors, getVisitorByGuid, getVisitorById, getVisitorDetails, enrichVisitor, getVisitorInfo, getVisitorJourney, getVisitorSessions, getVisitorStats, listApiKeys, createApiKey, getApiKeyPermissions, deleteApiKey, getApiKey, updateApiKey, activateApiKey, deactivateApiKey, emailStatus, login, requestMagicLink, verifyMagicLink, requestPasswordReset, resetPassword, verifyEmail, verifyMfaChallenge, runExternalServiceBackup, listS3Sources, createS3Source, deleteS3Source, getS3Source, updateS3Source, listSourceBackups, runBackupForSource, listBackupSchedules, createBackupSchedule, deleteBackupSchedule, getBackupSchedule, listBackupsForSchedule, disableBackupSchedule, enableBackupSchedule, getBackup, blobDelete, blobList, blobPut, blobCopy, blobDisable, blobEnable, blobStatus, blobUpdate, blobDownload, getDashboardProjectsAnalytics, getActivityGraph, getScanByDeployment, listProviders, createProvider, deleteProvider, getProvider, updateProvider, listManagedDomains, addManagedDomain, testProviderConnection, listProviderZones, removeManagedDomain, verifyManagedDomain, lookupDnsARecords, listDomains, createDomain, getDomainByHost, cancelDomainOrder, getDomainOrder, createOrRecreateOrder, finalizeOrder, setupDnsChallenge, deleteDomain, getDomainById, getChallengeToken, getHttpChallengeDebug, provisionDomain, renewDomain, checkDomainStatus, listDomains2, createDomain2, getDomainByName, deleteDomain2, getDomain, getDomainDnsRecords, setupDns, verifyDomain, listProviders2, createProvider2, deleteProvider2, getProvider2, testProvider, listEmails, sendEmail, getEmailStats, validateEmail, getEmail, listServices, createService, listAvailableContainers, getServiceBySlug, importExternalService, listProjectServices, getProjectServiceEnvironmentVariables, getProvidersMetadata, getProviderMetadata, getServiceTypes, getServiceTypeParameters, deleteService, getService, updateService, getServicePreviewEnvironmentVariablesMasked, getServicePreviewEnvironmentVariableNames, listServiceProjects, linkServiceToProject, unlinkServiceFromProject, getServiceEnvironmentVariables, getServiceEnvironmentVariable, startService, stopService, upgradeService, listRootContainers, listContainersAtPath, listEntities, getEntityInfo, queryData, downloadObject, getContainerInfo, checkExplorerSupport, getFile, getIpGeolocation, listConnections, deleteConnection, activateConnection, deactivateConnection, listRepositoriesByConnection, syncRepositories, updateConnectionToken, validateConnection, listGitProviders, createGitProvider, createGithubPatProvider, createGitlabOauthProvider, createGitlabPatProvider, deleteProvider3, getGitProvider, activateProvider, handleGitProviderOauthCallback, getProviderConnections, deactivateProvider, checkProviderDeletionSafety, startGitProviderOauth, deleteProviderSafely, getPublicRepository, getPublicBranches, detectPublicPresets, discoverWorkloads, executeImport, createPlan, listSources, getImportStatus, getIncident, updateIncidentStatus, getIncidentUpdates, adminListNodes, registerNode, adminRemoveNode, adminGetNode, adminListNodeContainers, adminUndrainNode, adminDrainStatus, adminDrainNode, nodeHeartbeat, getS3Credentials, listIpAccessControl, createIpAccessControl, checkIpBlocked, deleteIpAccessControl, getIpAccessControl, updateIpAccessControl, kvDel, kvDisable, kvEnable, kvExpire, kvGet, kvIncr, kvKeys, kvSet, kvStatus, kvTtl, kvUpdate, listRoutes, createRoute, deleteRoute, getRoute, updateRoute, logout, getLogContext, searchLogs, tailLogs, deleteMonitor, getMonitor, getBucketedStatus, getCurrentMonitorStatus, getUptimeHistory, deletePreferences, getPreferences, updatePreferences, listNotificationProviders, createNotificationProvider, createEmailProvider, updateEmailProvider, createSlackProvider, updateSlackProvider, createWebhookProvider, updateWebhookProvider, deleteProvider4, getNotificationProvider, updateProvider2, testProvider2, listOrders, queryGenaiTraces, getGenaiTrace, getHealth, listInsights, queryLogs, listMetricNames, queryMetrics, getPipelineStats, getQuota, queryTraceSummaries, queryTraces, getTrace, ingestLogs, ingestMetrics, ingestTraces, ingestLogsByPath, ingestMetricsByPath, ingestTracesByPath, hasPerformanceMetrics, getPerformanceMetrics, getMetricsOverTime, getGroupedPageMetrics, getAccessInfo, getPrivateIp, getPublicIp, listPresets, generatePresetDockerfile, getProjects, createProject, getProjectBySlug, createProjectFromTemplate, getProjectStatistics, deleteProject, getProject, updateProject, getProjectDeployments, getLastDeployment, triggerProjectPipeline, getActiveVisitors2, getAggregatedBuckets, updateAutomaticDeploy, listCustomDomainsForProject, createCustomDomain, deleteCustomDomain, getCustomDomain, updateCustomDomain, linkCustomDomainToCertificate, updateProjectDeploymentConfig, getDeployment, cancelDeployment, getDeploymentJobs, getDeploymentJobLogs, tailDeploymentJobLogs, getDeploymentOperations, executeDeploymentOperation, getDeploymentOperationStatus, pauseDeployment, promoteDeployment, resumeDeployment, rollbackToDeployment, teardownDeployment, listDsns, createDsn, getOrCreateDsn, regenerateDsn, revokeDsn, getEnvironmentVariables, createEnvironmentVariable, getEnvironmentVariableValue, deleteEnvironmentVariable, updateEnvironmentVariable, getEnvironments, createEnvironment, deleteEnvironment, getEnvironment, getEnvironmentCrons, getCronById, getCronExecutions, getEnvironmentDomains, addEnvironmentDomain, deleteEnvironmentDomain, updateEnvironmentSettings, sleepEnvironment, teardownEnvironment, wakeEnvironment, getContainerLogs, listContainers, getContainerDetail, getContainerLogsById, getContainerMetrics, streamContainerMetrics, restartContainer, startContainer, stopContainer, deployFromImage, deployFromImageUpload, deployFromStatic, getErrorDashboardStats, listErrorGroups, getErrorGroup, updateErrorGroup, listErrorEvents, getErrorEvent, getErrorStats, getErrorTimeSeries, getEventsCount2, getEventTypeBreakdown, recordConsoleEvent, getPropertyBreakdown, getPropertyTimeline, getEventsTimeline, getUniqueEvents, listExternalImages, registerExternalImage, deleteExternalImage, getExternalImage, listFunnels, createFunnel, previewFunnelMetrics, deleteFunnel, updateFunnel, getFunnelMetrics, updateGitSettings, hasErrorGroups, hasAnalyticsEvents, getHourlyVisits, listExternalImages2, pushExternalImage, getExternalImage2, listIncidents, createIncident, getBucketedIncidents, purgeProjectLogs, listMonitors, createMonitor, deleteReleaseSourceMaps, listSourceMaps, uploadSourceMap, updateProjectSettings, listReleases, deleteSourceMap, listStaticBundles, deleteStaticBundle, getStaticBundle, getStatusOverview, getUniqueCounts, uploadStaticBundle, listProjectScans, triggerScan, getLatestScansPerEnvironment, getLatestScan, listWebhooks, createWebhook, deleteWebhook, getWebhook, updateWebhook, listDeliveries, getDelivery, retryDelivery, getProxyLogs, getProxyLogByRequestId, getTimeBucketStats, getTodayStats, getProxyLogById, listSyncedRepositories, getRepositoryByName, getAllRepositoriesByName, getRepositoryPresetByName, getRepositoryBranches, getRepositoryTags, getRepositoryPresetLive, getBranchesByRepositoryId, listCommitsByRepositoryId, checkCommitExists, getTagsByRepositoryId, getProjectSessionReplays, getSessionEvents2, getSettings, updateSettings, revokeJoinToken, generateJoinToken, getJoinTokenStatus, getPublicSettings, listTemplates, listTemplateTags, getTemplate, getCurrentUser, listUsers, createUser, updateSelf, disableMfa, setupMfa, verifyAndEnableMfa, deleteUser, updateUser, restoreUser, assignRole, removeRole, getVisitorSessions2, deleteSessionReplay, getSessionReplay, updateSessionDuration, getSessionReplayEvents, addEvents, deleteScan, getScan, getScanVulnerabilities, listEventTypes, triggerWeeklyDigest, listExternalPlugins, reloadPlugins, ingestSentryEnvelope, ingestSentryEvent, listAuditLogs, getAuditLog } from '../sdk.gen'; import { queryOptions, type UseMutationOptions, type DefaultError, infiniteQueryOptions, type InfiniteData } from '@tanstack/react-query'; -import type { GetPlatformInfoData, ChunkUploadOptionsData, CreateReleaseData, CreateReleaseResponse, ListReleaseFilesData, UploadReleaseFileData, UploadReleaseFileResponse, RecordEventMetricsData, AddSessionReplayEventsData, AddSessionReplayEventsError, AddSessionReplayEventsResponse, InitSessionReplayData, InitSessionReplayError, InitSessionReplayResponse, RecordSpeedMetricsData, RecordSpeedMetricsError, UpdateSpeedMetricsData, UpdateSpeedMetricsError, GetPricingData, ListProviderKeysData, CreateProviderKeyData, CreateProviderKeyError, CreateProviderKeyResponse, TestProviderKeyInlineData, TestProviderKeyInlineError, TestProviderKeyInlineResponse, DeleteProviderKeyData, DeleteProviderKeyError, DeleteProviderKeyResponse, UpdateProviderKeyData, UpdateProviderKeyError, UpdateProviderKeyResponse, TestProviderKeyByIdData, TestProviderKeyByIdError, TestProviderKeyByIdResponse, GetUsageByProviderData, GetConversationsData, GetConversationDetailData, GetUsageRecentData, GetUsageSummaryData, GetUsageTimeseriesData, GetUsageTopModelsData, ChatCompletionsData, ChatCompletionsError, ChatCompletionsResponse, EmbeddingsData, EmbeddingsError, EmbeddingsResponse, ListModelsData, GetActiveVisitorsData, GetEventDetailData, GetEventVisitorsData, GetEventVisitorsResponse, GetEventsCountData, GetGeneralStatsData, GetLiveVisitorsListData, GetPageFlowData, GetPageHourlySessionsData, GetPagePathDetailData, GetPagePathVisitorsData, GetPagePathVisitorsResponse, GetPagePathsData, GetPagePathsSparklinesData, GetRecentActivityData, GetSessionDetailsData, GetSessionEventsData, GetSessionEventsResponse, GetSessionLogsData, GetSessionLogsResponse, GetVisitorsData, GetVisitorsResponse, GetVisitorByGuidData, GetVisitorByIdData, GetVisitorDetailsData, EnrichVisitorData, EnrichVisitorResponse2 as EnrichVisitorResponse, GetVisitorInfoData, GetVisitorJourneyData, GetVisitorSessionsData, GetVisitorStatsData, ListApiKeysData, ListApiKeysResponse, CreateApiKeyData, CreateApiKeyResponse2 as CreateApiKeyResponse, GetApiKeyPermissionsData, DeleteApiKeyData, DeleteApiKeyResponse, GetApiKeyData, UpdateApiKeyData, UpdateApiKeyResponse, ActivateApiKeyData, ActivateApiKeyResponse, DeactivateApiKeyData, DeactivateApiKeyResponse, EmailStatusData, LoginData, LoginResponse, RequestMagicLinkData, RequestMagicLinkResponse, VerifyMagicLinkData, RequestPasswordResetData, RequestPasswordResetResponse, ResetPasswordData, ResetPasswordResponse, VerifyEmailData, VerifyMfaChallengeData, VerifyMfaChallengeResponse, RunExternalServiceBackupData, RunExternalServiceBackupError, RunExternalServiceBackupResponse, ListS3SourcesData, CreateS3SourceData, CreateS3SourceError, CreateS3SourceResponse, DeleteS3SourceData, DeleteS3SourceError, DeleteS3SourceResponse, GetS3SourceData, UpdateS3SourceData, UpdateS3SourceError, UpdateS3SourceResponse, ListSourceBackupsData, RunBackupForSourceData, RunBackupForSourceError, RunBackupForSourceResponse, ListBackupSchedulesData, CreateBackupScheduleData, CreateBackupScheduleError, CreateBackupScheduleResponse, DeleteBackupScheduleData, DeleteBackupScheduleError, DeleteBackupScheduleResponse, GetBackupScheduleData, ListBackupsForScheduleData, DisableBackupScheduleData, DisableBackupScheduleResponse, EnableBackupScheduleData, EnableBackupScheduleResponse, GetBackupData, BlobDeleteData, BlobDeleteError, BlobDeleteResponse, BlobListData, BlobListError, BlobListResponse, BlobPutData, BlobPutError, BlobPutResponse, BlobCopyData, BlobCopyError, BlobCopyResponse, BlobDisableData, BlobDisableResponse, BlobEnableData, BlobEnableResponse, BlobStatusData, BlobUpdateData, BlobUpdateResponse, BlobDownloadData, GetDashboardProjectsAnalyticsData, GetActivityGraphData, GetScanByDeploymentData, ListProvidersData, CreateProviderData, CreateProviderResponse, DeleteProviderData, DeleteProviderResponse, GetProviderData, UpdateProviderData, UpdateProviderResponse, ListManagedDomainsData, AddManagedDomainData, AddManagedDomainResponse, TestProviderConnectionData, TestProviderConnectionResponse, ListProviderZonesData, RemoveManagedDomainData, RemoveManagedDomainResponse, VerifyManagedDomainData, VerifyManagedDomainResponse, LookupDnsARecordsData, ListDomainsData, ListDomainsResponse2 as ListDomainsResponse, CreateDomainData, CreateDomainResponse, GetDomainByHostData, CancelDomainOrderData, CancelDomainOrderResponse, GetDomainOrderData, CreateOrRecreateOrderData, CreateOrRecreateOrderResponse, FinalizeOrderData, FinalizeOrderResponse, SetupDnsChallengeData, SetupDnsChallengeResponse2 as SetupDnsChallengeResponse, DeleteDomainData, DeleteDomainResponse, GetDomainByIdData, GetChallengeTokenData, GetHttpChallengeDebugData, ProvisionDomainData, ProvisionDomainResponse, RenewDomainData, RenewDomainResponse, CheckDomainStatusData, ListDomains2Data, CreateDomain2Data, CreateDomain2Response, GetDomainByNameData, DeleteDomain2Data, DeleteDomain2Response, GetDomainData, GetDomainDnsRecordsData, SetupDnsData, SetupDnsResponse2 as SetupDnsResponse, VerifyDomainData, VerifyDomainResponse, ListProviders2Data, CreateProvider2Data, CreateProvider2Response, DeleteProvider2Data, DeleteProvider2Response, GetProvider2Data, TestProviderData, TestProviderResponse2 as TestProviderResponse, ListEmailsData, ListEmailsResponse, SendEmailData, SendEmailResponse, GetEmailStatsData, ValidateEmailData, ValidateEmailResponse2 as ValidateEmailResponse, GetEmailData, ListServicesData, ListServicesResponse, CreateServiceData, CreateServiceResponse, ListAvailableContainersData, GetServiceBySlugData, ImportExternalServiceData, ImportExternalServiceResponse, ListProjectServicesData, ListProjectServicesResponse, GetProjectServiceEnvironmentVariablesData, GetProvidersMetadataData, GetProviderMetadataData, GetServiceTypesData, GetServiceTypeParametersData, DeleteServiceData, DeleteServiceResponse, GetServiceData, UpdateServiceData, UpdateServiceResponse, GetServicePreviewEnvironmentVariablesMaskedData, GetServicePreviewEnvironmentVariableNamesData, ListServiceProjectsData, ListServiceProjectsResponse, LinkServiceToProjectData, LinkServiceToProjectResponse, UnlinkServiceFromProjectData, UnlinkServiceFromProjectResponse, GetServiceEnvironmentVariablesData, GetServiceEnvironmentVariableData, StartServiceData, StartServiceResponse, StopServiceData, StopServiceResponse, UpgradeServiceData, UpgradeServiceResponse, ListRootContainersData, ListContainersAtPathData, ListEntitiesData, GetEntityInfoData, QueryDataData, QueryDataResponse2 as QueryDataResponse, DownloadObjectData, GetContainerInfoData, CheckExplorerSupportData, GetFileData, GetIpGeolocationData, ListConnectionsData, ListConnectionsResponse, DeleteConnectionData, DeleteConnectionResponse, ActivateConnectionData, DeactivateConnectionData, ListRepositoriesByConnectionData, ListRepositoriesByConnectionResponse, SyncRepositoriesData, SyncRepositoriesResponse, UpdateConnectionTokenData, UpdateConnectionTokenResponse, ValidateConnectionData, ListGitProvidersData, CreateGitProviderData, CreateGitProviderResponse, CreateGithubPatProviderData, CreateGithubPatProviderResponse, CreateGitlabOauthProviderData, CreateGitlabOauthProviderResponse, CreateGitlabPatProviderData, CreateGitlabPatProviderResponse, DeleteProvider3Data, DeleteProvider3Response, GetGitProviderData, ActivateProviderData, HandleGitProviderOauthCallbackData, GetProviderConnectionsData, DeactivateProviderData, CheckProviderDeletionSafetyData, StartGitProviderOauthData, DeleteProviderSafelyData, DeleteProviderSafelyResponse, GetPublicRepositoryData, GetPublicBranchesData, DetectPublicPresetsData, DiscoverWorkloadsData, DiscoverWorkloadsResponse, ExecuteImportData, ExecuteImportResponse2 as ExecuteImportResponse, CreatePlanData, CreatePlanResponse2 as CreatePlanResponse, ListSourcesData, GetImportStatusData, GetIncidentData, UpdateIncidentStatusData, UpdateIncidentStatusResponse, GetIncidentUpdatesData, AdminListNodesData, RegisterNodeData, RegisterNodeResponse2 as RegisterNodeResponse, AdminRemoveNodeData, AdminRemoveNodeResponse, AdminGetNodeData, AdminListNodeContainersData, AdminUndrainNodeData, AdminUndrainNodeResponse, AdminDrainStatusData, AdminDrainNodeData, AdminDrainNodeResponse, NodeHeartbeatData, NodeHeartbeatResponse, ListIpAccessControlData, CreateIpAccessControlData, CreateIpAccessControlError, CreateIpAccessControlResponse, CheckIpBlockedData, DeleteIpAccessControlData, DeleteIpAccessControlError, DeleteIpAccessControlResponse, GetIpAccessControlData, UpdateIpAccessControlData, UpdateIpAccessControlError, UpdateIpAccessControlResponse, KvDelData, KvDelResponse, KvDisableData, KvDisableResponse, KvEnableData, KvEnableResponse, KvExpireData, KvExpireResponse, KvGetData, KvGetResponse, KvIncrData, KvIncrResponse, KvKeysData, KvKeysResponse, KvSetData, KvSetResponse, KvStatusData, KvTtlData, KvTtlResponse, KvUpdateData, KvUpdateResponse, ListRoutesData, CreateRouteData, CreateRouteResponse, DeleteRouteData, DeleteRouteResponse, GetRouteData, UpdateRouteData, UpdateRouteResponse, LogoutData, GetLogContextData, SearchLogsData, SearchLogsError, SearchLogsResponse2 as SearchLogsResponse, TailLogsData, DeleteMonitorData, DeleteMonitorResponse, GetMonitorData, GetBucketedStatusData, GetCurrentMonitorStatusData, GetUptimeHistoryData, DeletePreferencesData, DeletePreferencesResponse, GetPreferencesData, UpdatePreferencesData, UpdatePreferencesResponse, ListNotificationProvidersData, ListNotificationProvidersResponse, CreateNotificationProviderData, CreateNotificationProviderResponse, CreateEmailProviderData, CreateEmailProviderResponse, UpdateEmailProviderData, UpdateEmailProviderResponse, CreateSlackProviderData, CreateSlackProviderResponse, UpdateSlackProviderData, UpdateSlackProviderResponse, CreateWebhookProviderData, CreateWebhookProviderResponse, UpdateWebhookProviderData, UpdateWebhookProviderResponse, DeleteProvider4Data, DeleteProvider4Response, GetNotificationProviderData, UpdateProvider2Data, UpdateProvider2Response, TestProvider2Data, TestProvider2Response, ListOrdersData, ListOrdersResponse2 as ListOrdersResponse, QueryGenaiTracesData, QueryGenaiTracesError, QueryGenaiTracesResponse, GetGenaiTraceData, GetHealthData, ListInsightsData, ListInsightsError, ListInsightsResponse, QueryLogsData, QueryLogsError, QueryLogsResponse, ListMetricNamesData, QueryMetricsData, GetPipelineStatsData, GetQuotaData, QueryTraceSummariesData, QueryTraceSummariesError, QueryTraceSummariesResponse, QueryTracesData, QueryTracesError, QueryTracesResponse, GetTraceData, IngestLogsData, IngestLogsError, IngestMetricsData, IngestMetricsError, IngestTracesData, IngestTracesError, IngestLogsByPathData, IngestLogsByPathError, IngestMetricsByPathData, IngestMetricsByPathError, IngestTracesByPathData, IngestTracesByPathError, HasPerformanceMetricsData, GetPerformanceMetricsData, GetMetricsOverTimeData, GetGroupedPageMetricsData, GetAccessInfoData, GetPrivateIpData, GetPublicIpData, ListPresetsData, GeneratePresetDockerfileData, GeneratePresetDockerfileResponse, GetProjectsData, GetProjectsResponse, CreateProjectData, CreateProjectResponse, GetProjectBySlugData, CreateProjectFromTemplateData, CreateProjectFromTemplateResponse2 as CreateProjectFromTemplateResponse, GetProjectStatisticsData, DeleteProjectData, DeleteProjectResponse, GetProjectData, UpdateProjectData, UpdateProjectResponse, GetProjectDeploymentsData, GetProjectDeploymentsResponse, GetLastDeploymentData, TriggerProjectPipelineData, TriggerProjectPipelineResponse, GetActiveVisitors2Data, GetAggregatedBucketsData, UpdateAutomaticDeployData, UpdateAutomaticDeployResponse, ListCustomDomainsForProjectData, CreateCustomDomainData, CreateCustomDomainResponse, DeleteCustomDomainData, DeleteCustomDomainResponse, GetCustomDomainData, UpdateCustomDomainData, UpdateCustomDomainResponse, LinkCustomDomainToCertificateData, LinkCustomDomainToCertificateResponse, UpdateProjectDeploymentConfigData, UpdateProjectDeploymentConfigResponse, GetDeploymentData, CancelDeploymentData, CancelDeploymentResponse, GetDeploymentJobsData, GetDeploymentJobLogsData, TailDeploymentJobLogsData, GetDeploymentOperationsData, ExecuteDeploymentOperationData, ExecuteDeploymentOperationResponse, GetDeploymentOperationStatusData, PauseDeploymentData, PauseDeploymentResponse, PromoteDeploymentData, PromoteDeploymentResponse, ResumeDeploymentData, ResumeDeploymentResponse, RollbackToDeploymentData, RollbackToDeploymentResponse, TeardownDeploymentData, TeardownDeploymentResponse, ListDsnsData, CreateDsnData, CreateDsnResponse, GetOrCreateDsnData, GetOrCreateDsnResponse, RegenerateDsnData, RegenerateDsnResponse, RevokeDsnData, RevokeDsnResponse, GetEnvironmentVariablesData, CreateEnvironmentVariableData, CreateEnvironmentVariableResponse, GetEnvironmentVariableValueData, DeleteEnvironmentVariableData, DeleteEnvironmentVariableResponse, UpdateEnvironmentVariableData, UpdateEnvironmentVariableResponse, GetEnvironmentsData, CreateEnvironmentData, CreateEnvironmentResponse, DeleteEnvironmentData, DeleteEnvironmentResponse, GetEnvironmentData, GetEnvironmentCronsData, GetCronByIdData, GetCronExecutionsData, GetCronExecutionsResponse, GetEnvironmentDomainsData, AddEnvironmentDomainData, AddEnvironmentDomainResponse, DeleteEnvironmentDomainData, DeleteEnvironmentDomainResponse, UpdateEnvironmentSettingsData, UpdateEnvironmentSettingsResponse, SleepEnvironmentData, SleepEnvironmentResponse, TeardownEnvironmentData, TeardownEnvironmentResponse, WakeEnvironmentData, WakeEnvironmentResponse, GetContainerLogsData, ListContainersData, GetContainerDetailData, GetContainerLogsByIdData, GetContainerMetricsData, StreamContainerMetricsData, RestartContainerData, RestartContainerResponse, StartContainerData, StartContainerResponse, StopContainerData, StopContainerResponse, DeployFromImageData, DeployFromImageResponse, DeployFromImageUploadData, DeployFromImageUploadResponse, DeployFromStaticData, DeployFromStaticResponse, GetErrorDashboardStatsData, ListErrorGroupsData, ListErrorGroupsResponse, GetErrorGroupData, UpdateErrorGroupData, ListErrorEventsData, ListErrorEventsResponse, GetErrorEventData, GetErrorStatsData, GetErrorTimeSeriesData, GetEventsCount2Data, GetEventTypeBreakdownData, RecordConsoleEventData, GetPropertyBreakdownData, GetPropertyTimelineData, GetEventsTimelineData, GetUniqueEventsData, GetUniqueEventsResponse, ListExternalImagesData, ListExternalImagesResponse, RegisterExternalImageData, RegisterExternalImageResponse, DeleteExternalImageData, DeleteExternalImageResponse, GetExternalImageData, ListFunnelsData, CreateFunnelData, CreateFunnelResponse2 as CreateFunnelResponse, PreviewFunnelMetricsData, PreviewFunnelMetricsResponse, DeleteFunnelData, UpdateFunnelData, GetFunnelMetricsData, UpdateGitSettingsData, UpdateGitSettingsResponse, HasErrorGroupsData, HasAnalyticsEventsData, GetHourlyVisitsData, ListExternalImages2Data, PushExternalImageData, PushExternalImageResponse, GetExternalImage2Data, ListIncidentsData, CreateIncidentData, CreateIncidentResponse, GetBucketedIncidentsData, PurgeProjectLogsData, PurgeProjectLogsError, ListMonitorsData, CreateMonitorData, CreateMonitorResponse, DeleteReleaseSourceMapsData, DeleteReleaseSourceMapsResponse, ListSourceMapsData, UploadSourceMapData, UploadSourceMapResponse, UpdateProjectSettingsData, UpdateProjectSettingsResponse, ListReleasesData, DeleteSourceMapData, DeleteSourceMapResponse, ListStaticBundlesData, ListStaticBundlesResponse, DeleteStaticBundleData, DeleteStaticBundleResponse, GetStaticBundleData, GetStatusOverviewData, GetUniqueCountsData, UploadStaticBundleData, UploadStaticBundleResponse, ListProjectScansData, ListProjectScansError, ListProjectScansResponse, TriggerScanData, TriggerScanError, TriggerScanResponse2 as TriggerScanResponse, GetLatestScansPerEnvironmentData, GetLatestScanData, ListWebhooksData, ListWebhooksResponse, CreateWebhookData, CreateWebhookResponse, DeleteWebhookData, DeleteWebhookResponse, GetWebhookData, UpdateWebhookData, UpdateWebhookResponse, ListDeliveriesData, GetDeliveryData, RetryDeliveryData, RetryDeliveryResponse, GetProxyLogsData, GetProxyLogsResponse, GetProxyLogByRequestIdData, GetTimeBucketStatsData, GetTodayStatsData, GetProxyLogByIdData, ListSyncedRepositoriesData, ListSyncedRepositoriesResponse, GetRepositoryByNameData, GetAllRepositoriesByNameData, GetRepositoryPresetByNameData, GetRepositoryBranchesData, GetRepositoryTagsData, GetRepositoryPresetLiveData, GetBranchesByRepositoryIdData, ListCommitsByRepositoryIdData, CheckCommitExistsData, GetTagsByRepositoryIdData, GetProjectSessionReplaysData, GetProjectSessionReplaysError, GetProjectSessionReplaysResponse2 as GetProjectSessionReplaysResponse, GetSessionEvents2Data, GetSettingsData, UpdateSettingsData, UpdateSettingsResponse, RevokeJoinTokenData, RevokeJoinTokenResponse, GenerateJoinTokenData, GenerateJoinTokenResponse2 as GenerateJoinTokenResponse, GetJoinTokenStatusData, ListTemplatesData, ListTemplateTagsData, GetTemplateData, GetCurrentUserData, ListUsersData, CreateUserData, CreateUserResponse, UpdateSelfData, UpdateSelfResponse, DisableMfaData, DisableMfaResponse, SetupMfaData, SetupMfaResponse, VerifyAndEnableMfaData, VerifyAndEnableMfaResponse, DeleteUserData, DeleteUserResponse, UpdateUserData, UpdateUserResponse, RestoreUserData, RestoreUserResponse, AssignRoleData, RemoveRoleData, RemoveRoleResponse, GetVisitorSessions2Data, GetVisitorSessions2Error, GetVisitorSessions2Response, DeleteSessionReplayData, DeleteSessionReplayError, GetSessionReplayData, UpdateSessionDurationData, UpdateSessionDurationError, UpdateSessionDurationResponse2 as UpdateSessionDurationResponse, GetSessionReplayEventsData, AddEventsData, AddEventsError, AddEventsResponse2 as AddEventsResponse, DeleteScanData, DeleteScanError, DeleteScanResponse, GetScanData, GetScanVulnerabilitiesData, GetScanVulnerabilitiesError, GetScanVulnerabilitiesResponse, ListEventTypesData, TriggerWeeklyDigestData, TriggerWeeklyDigestResponse, ListExternalPluginsData, ReloadPluginsData, ReloadPluginsResponse, IngestSentryEnvelopeData, IngestSentryEventData, IngestSentryEventResponse, ListAuditLogsData, ListAuditLogsResponse, GetAuditLogData } from '../types.gen'; +import type { GetPlatformInfoData, ChunkUploadOptionsData, CreateReleaseData, CreateReleaseResponse, ListReleaseFilesData, UploadReleaseFileData, UploadReleaseFileResponse, RecordEventMetricsData, AddSessionReplayEventsData, AddSessionReplayEventsError, AddSessionReplayEventsResponse, InitSessionReplayData, InitSessionReplayError, InitSessionReplayResponse, RecordSpeedMetricsData, RecordSpeedMetricsError, UpdateSpeedMetricsData, UpdateSpeedMetricsError, GetPricingData, ListProviderKeysData, CreateProviderKeyData, CreateProviderKeyError, CreateProviderKeyResponse, TestProviderKeyInlineData, TestProviderKeyInlineError, TestProviderKeyInlineResponse, DeleteProviderKeyData, DeleteProviderKeyError, DeleteProviderKeyResponse, UpdateProviderKeyData, UpdateProviderKeyError, UpdateProviderKeyResponse, TestProviderKeyByIdData, TestProviderKeyByIdError, TestProviderKeyByIdResponse, GetUsageByProviderData, GetConversationsData, GetConversationDetailData, GetUsageRecentData, GetUsageSummaryData, GetUsageTimeseriesData, GetUsageTopModelsData, ChatCompletionsData, ChatCompletionsError, ChatCompletionsResponse, EmbeddingsData, EmbeddingsError, EmbeddingsResponse, ListModelsData, GetActiveVisitorsData, GetEventDetailData, GetEventVisitorsData, GetEventVisitorsResponse, GetEventsCountData, GetGeneralStatsData, GetLiveVisitorsListData, GetPageFlowData, GetPageHourlySessionsData, GetPagePathDetailData, GetPagePathVisitorsData, GetPagePathVisitorsResponse, GetPagePathsData, GetPagePathsSparklinesData, GetRecentActivityData, GetSessionDetailsData, GetSessionEventsData, GetSessionEventsResponse, GetSessionLogsData, GetSessionLogsResponse, GetVisitorsData, GetVisitorsResponse, GetVisitorByGuidData, GetVisitorByIdData, GetVisitorDetailsData, EnrichVisitorData, EnrichVisitorResponse2 as EnrichVisitorResponse, GetVisitorInfoData, GetVisitorJourneyData, GetVisitorSessionsData, GetVisitorStatsData, ListApiKeysData, ListApiKeysResponse, CreateApiKeyData, CreateApiKeyResponse2 as CreateApiKeyResponse, GetApiKeyPermissionsData, DeleteApiKeyData, DeleteApiKeyResponse, GetApiKeyData, UpdateApiKeyData, UpdateApiKeyResponse, ActivateApiKeyData, ActivateApiKeyResponse, DeactivateApiKeyData, DeactivateApiKeyResponse, EmailStatusData, LoginData, LoginResponse, RequestMagicLinkData, RequestMagicLinkResponse, VerifyMagicLinkData, RequestPasswordResetData, RequestPasswordResetResponse, ResetPasswordData, ResetPasswordResponse, VerifyEmailData, VerifyMfaChallengeData, VerifyMfaChallengeResponse, RunExternalServiceBackupData, RunExternalServiceBackupError, RunExternalServiceBackupResponse, ListS3SourcesData, CreateS3SourceData, CreateS3SourceError, CreateS3SourceResponse, DeleteS3SourceData, DeleteS3SourceError, DeleteS3SourceResponse, GetS3SourceData, UpdateS3SourceData, UpdateS3SourceError, UpdateS3SourceResponse, ListSourceBackupsData, RunBackupForSourceData, RunBackupForSourceError, RunBackupForSourceResponse, ListBackupSchedulesData, CreateBackupScheduleData, CreateBackupScheduleError, CreateBackupScheduleResponse, DeleteBackupScheduleData, DeleteBackupScheduleError, DeleteBackupScheduleResponse, GetBackupScheduleData, ListBackupsForScheduleData, DisableBackupScheduleData, DisableBackupScheduleResponse, EnableBackupScheduleData, EnableBackupScheduleResponse, GetBackupData, BlobDeleteData, BlobDeleteError, BlobDeleteResponse, BlobListData, BlobListError, BlobListResponse, BlobPutData, BlobPutError, BlobPutResponse, BlobCopyData, BlobCopyError, BlobCopyResponse, BlobDisableData, BlobDisableResponse, BlobEnableData, BlobEnableResponse, BlobStatusData, BlobUpdateData, BlobUpdateResponse, BlobDownloadData, GetDashboardProjectsAnalyticsData, GetActivityGraphData, GetScanByDeploymentData, ListProvidersData, CreateProviderData, CreateProviderResponse, DeleteProviderData, DeleteProviderResponse, GetProviderData, UpdateProviderData, UpdateProviderResponse, ListManagedDomainsData, AddManagedDomainData, AddManagedDomainResponse, TestProviderConnectionData, TestProviderConnectionResponse, ListProviderZonesData, RemoveManagedDomainData, RemoveManagedDomainResponse, VerifyManagedDomainData, VerifyManagedDomainResponse, LookupDnsARecordsData, ListDomainsData, ListDomainsResponse2 as ListDomainsResponse, CreateDomainData, CreateDomainResponse, GetDomainByHostData, CancelDomainOrderData, CancelDomainOrderResponse, GetDomainOrderData, CreateOrRecreateOrderData, CreateOrRecreateOrderResponse, FinalizeOrderData, FinalizeOrderResponse, SetupDnsChallengeData, SetupDnsChallengeResponse2 as SetupDnsChallengeResponse, DeleteDomainData, DeleteDomainResponse, GetDomainByIdData, GetChallengeTokenData, GetHttpChallengeDebugData, ProvisionDomainData, ProvisionDomainResponse, RenewDomainData, RenewDomainResponse, CheckDomainStatusData, ListDomains2Data, CreateDomain2Data, CreateDomain2Response, GetDomainByNameData, DeleteDomain2Data, DeleteDomain2Response, GetDomainData, GetDomainDnsRecordsData, SetupDnsData, SetupDnsResponse2 as SetupDnsResponse, VerifyDomainData, VerifyDomainResponse, ListProviders2Data, CreateProvider2Data, CreateProvider2Response, DeleteProvider2Data, DeleteProvider2Response, GetProvider2Data, TestProviderData, TestProviderResponse2 as TestProviderResponse, ListEmailsData, ListEmailsResponse, SendEmailData, SendEmailResponse, GetEmailStatsData, ValidateEmailData, ValidateEmailResponse2 as ValidateEmailResponse, GetEmailData, ListServicesData, ListServicesResponse, CreateServiceData, CreateServiceResponse, ListAvailableContainersData, GetServiceBySlugData, ImportExternalServiceData, ImportExternalServiceResponse, ListProjectServicesData, ListProjectServicesResponse, GetProjectServiceEnvironmentVariablesData, GetProvidersMetadataData, GetProviderMetadataData, GetServiceTypesData, GetServiceTypeParametersData, DeleteServiceData, DeleteServiceResponse, GetServiceData, UpdateServiceData, UpdateServiceResponse, GetServicePreviewEnvironmentVariablesMaskedData, GetServicePreviewEnvironmentVariableNamesData, ListServiceProjectsData, ListServiceProjectsResponse, LinkServiceToProjectData, LinkServiceToProjectResponse, UnlinkServiceFromProjectData, UnlinkServiceFromProjectResponse, GetServiceEnvironmentVariablesData, GetServiceEnvironmentVariableData, StartServiceData, StartServiceResponse, StopServiceData, StopServiceResponse, UpgradeServiceData, UpgradeServiceResponse, ListRootContainersData, ListContainersAtPathData, ListEntitiesData, GetEntityInfoData, QueryDataData, QueryDataResponse2 as QueryDataResponse, DownloadObjectData, GetContainerInfoData, CheckExplorerSupportData, GetFileData, GetIpGeolocationData, ListConnectionsData, ListConnectionsResponse, DeleteConnectionData, DeleteConnectionResponse, ActivateConnectionData, DeactivateConnectionData, ListRepositoriesByConnectionData, ListRepositoriesByConnectionResponse, SyncRepositoriesData, SyncRepositoriesResponse, UpdateConnectionTokenData, UpdateConnectionTokenResponse, ValidateConnectionData, ListGitProvidersData, CreateGitProviderData, CreateGitProviderResponse, CreateGithubPatProviderData, CreateGithubPatProviderResponse, CreateGitlabOauthProviderData, CreateGitlabOauthProviderResponse, CreateGitlabPatProviderData, CreateGitlabPatProviderResponse, DeleteProvider3Data, DeleteProvider3Response, GetGitProviderData, ActivateProviderData, HandleGitProviderOauthCallbackData, GetProviderConnectionsData, DeactivateProviderData, CheckProviderDeletionSafetyData, StartGitProviderOauthData, DeleteProviderSafelyData, DeleteProviderSafelyResponse, GetPublicRepositoryData, GetPublicBranchesData, DetectPublicPresetsData, DiscoverWorkloadsData, DiscoverWorkloadsResponse, ExecuteImportData, ExecuteImportResponse2 as ExecuteImportResponse, CreatePlanData, CreatePlanResponse2 as CreatePlanResponse, ListSourcesData, GetImportStatusData, GetIncidentData, UpdateIncidentStatusData, UpdateIncidentStatusResponse, GetIncidentUpdatesData, AdminListNodesData, RegisterNodeData, RegisterNodeResponse2 as RegisterNodeResponse, AdminRemoveNodeData, AdminRemoveNodeResponse, AdminGetNodeData, AdminListNodeContainersData, AdminUndrainNodeData, AdminUndrainNodeResponse, AdminDrainStatusData, AdminDrainNodeData, AdminDrainNodeResponse, NodeHeartbeatData, NodeHeartbeatResponse, GetS3CredentialsData, ListIpAccessControlData, CreateIpAccessControlData, CreateIpAccessControlError, CreateIpAccessControlResponse, CheckIpBlockedData, DeleteIpAccessControlData, DeleteIpAccessControlError, DeleteIpAccessControlResponse, GetIpAccessControlData, UpdateIpAccessControlData, UpdateIpAccessControlError, UpdateIpAccessControlResponse, KvDelData, KvDelResponse, KvDisableData, KvDisableResponse, KvEnableData, KvEnableResponse, KvExpireData, KvExpireResponse, KvGetData, KvGetResponse, KvIncrData, KvIncrResponse, KvKeysData, KvKeysResponse, KvSetData, KvSetResponse, KvStatusData, KvTtlData, KvTtlResponse, KvUpdateData, KvUpdateResponse, ListRoutesData, CreateRouteData, CreateRouteResponse, DeleteRouteData, DeleteRouteResponse, GetRouteData, UpdateRouteData, UpdateRouteResponse, LogoutData, GetLogContextData, SearchLogsData, SearchLogsError, SearchLogsResponse2 as SearchLogsResponse, TailLogsData, DeleteMonitorData, DeleteMonitorResponse, GetMonitorData, GetBucketedStatusData, GetCurrentMonitorStatusData, GetUptimeHistoryData, DeletePreferencesData, DeletePreferencesResponse, GetPreferencesData, UpdatePreferencesData, UpdatePreferencesResponse, ListNotificationProvidersData, ListNotificationProvidersResponse, CreateNotificationProviderData, CreateNotificationProviderResponse, CreateEmailProviderData, CreateEmailProviderResponse, UpdateEmailProviderData, UpdateEmailProviderResponse, CreateSlackProviderData, CreateSlackProviderResponse, UpdateSlackProviderData, UpdateSlackProviderResponse, CreateWebhookProviderData, CreateWebhookProviderResponse, UpdateWebhookProviderData, UpdateWebhookProviderResponse, DeleteProvider4Data, DeleteProvider4Response, GetNotificationProviderData, UpdateProvider2Data, UpdateProvider2Response, TestProvider2Data, TestProvider2Response, ListOrdersData, ListOrdersResponse2 as ListOrdersResponse, QueryGenaiTracesData, QueryGenaiTracesError, QueryGenaiTracesResponse, GetGenaiTraceData, GetHealthData, ListInsightsData, ListInsightsError, ListInsightsResponse, QueryLogsData, QueryLogsError, QueryLogsResponse, ListMetricNamesData, QueryMetricsData, GetPipelineStatsData, GetQuotaData, QueryTraceSummariesData, QueryTraceSummariesError, QueryTraceSummariesResponse, QueryTracesData, QueryTracesError, QueryTracesResponse, GetTraceData, IngestLogsData, IngestLogsError, IngestMetricsData, IngestMetricsError, IngestTracesData, IngestTracesError, IngestLogsByPathData, IngestLogsByPathError, IngestMetricsByPathData, IngestMetricsByPathError, IngestTracesByPathData, IngestTracesByPathError, HasPerformanceMetricsData, GetPerformanceMetricsData, GetMetricsOverTimeData, GetGroupedPageMetricsData, GetAccessInfoData, GetPrivateIpData, GetPublicIpData, ListPresetsData, GeneratePresetDockerfileData, GeneratePresetDockerfileResponse, GetProjectsData, GetProjectsResponse, CreateProjectData, CreateProjectResponse, GetProjectBySlugData, CreateProjectFromTemplateData, CreateProjectFromTemplateResponse2 as CreateProjectFromTemplateResponse, GetProjectStatisticsData, DeleteProjectData, DeleteProjectResponse, GetProjectData, UpdateProjectData, UpdateProjectResponse, GetProjectDeploymentsData, GetProjectDeploymentsResponse, GetLastDeploymentData, TriggerProjectPipelineData, TriggerProjectPipelineResponse, GetActiveVisitors2Data, GetAggregatedBucketsData, UpdateAutomaticDeployData, UpdateAutomaticDeployResponse, ListCustomDomainsForProjectData, CreateCustomDomainData, CreateCustomDomainResponse, DeleteCustomDomainData, DeleteCustomDomainResponse, GetCustomDomainData, UpdateCustomDomainData, UpdateCustomDomainResponse, LinkCustomDomainToCertificateData, LinkCustomDomainToCertificateResponse, UpdateProjectDeploymentConfigData, UpdateProjectDeploymentConfigResponse, GetDeploymentData, CancelDeploymentData, CancelDeploymentResponse, GetDeploymentJobsData, GetDeploymentJobLogsData, TailDeploymentJobLogsData, GetDeploymentOperationsData, ExecuteDeploymentOperationData, ExecuteDeploymentOperationResponse, GetDeploymentOperationStatusData, PauseDeploymentData, PauseDeploymentResponse, PromoteDeploymentData, PromoteDeploymentResponse, ResumeDeploymentData, ResumeDeploymentResponse, RollbackToDeploymentData, RollbackToDeploymentResponse, TeardownDeploymentData, TeardownDeploymentResponse, ListDsnsData, CreateDsnData, CreateDsnResponse, GetOrCreateDsnData, GetOrCreateDsnResponse, RegenerateDsnData, RegenerateDsnResponse, RevokeDsnData, RevokeDsnResponse, GetEnvironmentVariablesData, CreateEnvironmentVariableData, CreateEnvironmentVariableResponse, GetEnvironmentVariableValueData, DeleteEnvironmentVariableData, DeleteEnvironmentVariableResponse, UpdateEnvironmentVariableData, UpdateEnvironmentVariableResponse, GetEnvironmentsData, CreateEnvironmentData, CreateEnvironmentResponse, DeleteEnvironmentData, DeleteEnvironmentResponse, GetEnvironmentData, GetEnvironmentCronsData, GetCronByIdData, GetCronExecutionsData, GetCronExecutionsResponse, GetEnvironmentDomainsData, AddEnvironmentDomainData, AddEnvironmentDomainResponse, DeleteEnvironmentDomainData, DeleteEnvironmentDomainResponse, UpdateEnvironmentSettingsData, UpdateEnvironmentSettingsResponse, SleepEnvironmentData, SleepEnvironmentResponse, TeardownEnvironmentData, TeardownEnvironmentResponse, WakeEnvironmentData, WakeEnvironmentResponse, GetContainerLogsData, ListContainersData, GetContainerDetailData, GetContainerLogsByIdData, GetContainerMetricsData, StreamContainerMetricsData, RestartContainerData, RestartContainerResponse, StartContainerData, StartContainerResponse, StopContainerData, StopContainerResponse, DeployFromImageData, DeployFromImageResponse, DeployFromImageUploadData, DeployFromImageUploadResponse, DeployFromStaticData, DeployFromStaticResponse, GetErrorDashboardStatsData, ListErrorGroupsData, ListErrorGroupsResponse, GetErrorGroupData, UpdateErrorGroupData, ListErrorEventsData, ListErrorEventsResponse, GetErrorEventData, GetErrorStatsData, GetErrorTimeSeriesData, GetEventsCount2Data, GetEventTypeBreakdownData, RecordConsoleEventData, GetPropertyBreakdownData, GetPropertyTimelineData, GetEventsTimelineData, GetUniqueEventsData, GetUniqueEventsResponse, ListExternalImagesData, ListExternalImagesResponse, RegisterExternalImageData, RegisterExternalImageResponse, DeleteExternalImageData, DeleteExternalImageResponse, GetExternalImageData, ListFunnelsData, CreateFunnelData, CreateFunnelResponse2 as CreateFunnelResponse, PreviewFunnelMetricsData, PreviewFunnelMetricsResponse, DeleteFunnelData, UpdateFunnelData, GetFunnelMetricsData, UpdateGitSettingsData, UpdateGitSettingsResponse, HasErrorGroupsData, HasAnalyticsEventsData, GetHourlyVisitsData, ListExternalImages2Data, PushExternalImageData, PushExternalImageResponse, GetExternalImage2Data, ListIncidentsData, CreateIncidentData, CreateIncidentResponse, GetBucketedIncidentsData, PurgeProjectLogsData, PurgeProjectLogsError, ListMonitorsData, CreateMonitorData, CreateMonitorResponse, DeleteReleaseSourceMapsData, DeleteReleaseSourceMapsResponse, ListSourceMapsData, UploadSourceMapData, UploadSourceMapResponse, UpdateProjectSettingsData, UpdateProjectSettingsResponse, ListReleasesData, DeleteSourceMapData, DeleteSourceMapResponse, ListStaticBundlesData, ListStaticBundlesResponse, DeleteStaticBundleData, DeleteStaticBundleResponse, GetStaticBundleData, GetStatusOverviewData, GetUniqueCountsData, UploadStaticBundleData, UploadStaticBundleResponse, ListProjectScansData, ListProjectScansError, ListProjectScansResponse, TriggerScanData, TriggerScanError, TriggerScanResponse2 as TriggerScanResponse, GetLatestScansPerEnvironmentData, GetLatestScanData, ListWebhooksData, ListWebhooksResponse, CreateWebhookData, CreateWebhookResponse, DeleteWebhookData, DeleteWebhookResponse, GetWebhookData, UpdateWebhookData, UpdateWebhookResponse, ListDeliveriesData, GetDeliveryData, RetryDeliveryData, RetryDeliveryResponse, GetProxyLogsData, GetProxyLogsResponse, GetProxyLogByRequestIdData, GetTimeBucketStatsData, GetTodayStatsData, GetProxyLogByIdData, ListSyncedRepositoriesData, ListSyncedRepositoriesResponse, GetRepositoryByNameData, GetAllRepositoriesByNameData, GetRepositoryPresetByNameData, GetRepositoryBranchesData, GetRepositoryTagsData, GetRepositoryPresetLiveData, GetBranchesByRepositoryIdData, ListCommitsByRepositoryIdData, CheckCommitExistsData, GetTagsByRepositoryIdData, GetProjectSessionReplaysData, GetProjectSessionReplaysError, GetProjectSessionReplaysResponse2 as GetProjectSessionReplaysResponse, GetSessionEvents2Data, GetSettingsData, UpdateSettingsData, UpdateSettingsResponse, RevokeJoinTokenData, RevokeJoinTokenResponse, GenerateJoinTokenData, GenerateJoinTokenResponse2 as GenerateJoinTokenResponse, GetJoinTokenStatusData, GetPublicSettingsData, ListTemplatesData, ListTemplateTagsData, GetTemplateData, GetCurrentUserData, ListUsersData, CreateUserData, CreateUserResponse, UpdateSelfData, UpdateSelfResponse, DisableMfaData, DisableMfaResponse, SetupMfaData, SetupMfaResponse, VerifyAndEnableMfaData, VerifyAndEnableMfaResponse, DeleteUserData, DeleteUserResponse, UpdateUserData, UpdateUserResponse, RestoreUserData, RestoreUserResponse, AssignRoleData, RemoveRoleData, RemoveRoleResponse, GetVisitorSessions2Data, GetVisitorSessions2Error, GetVisitorSessions2Response, DeleteSessionReplayData, DeleteSessionReplayError, GetSessionReplayData, UpdateSessionDurationData, UpdateSessionDurationError, UpdateSessionDurationResponse2 as UpdateSessionDurationResponse, GetSessionReplayEventsData, AddEventsData, AddEventsError, AddEventsResponse2 as AddEventsResponse, DeleteScanData, DeleteScanError, DeleteScanResponse, GetScanData, GetScanVulnerabilitiesData, GetScanVulnerabilitiesError, GetScanVulnerabilitiesResponse, ListEventTypesData, TriggerWeeklyDigestData, TriggerWeeklyDigestResponse, ListExternalPluginsData, ReloadPluginsData, ReloadPluginsResponse, IngestSentryEnvelopeData, IngestSentryEventData, IngestSentryEventResponse, ListAuditLogsData, ListAuditLogsResponse, GetAuditLogData } from '../types.gen'; import { client } from '../client.gen'; export type QueryKey = [ @@ -4451,6 +4451,29 @@ export const nodeHeartbeatMutation = (options?: Partial) => createQueryKey('getS3Credentials', options); + +/** + * Get decrypted S3 credentials for a backup/restore operation. + * Agents call this endpoint to receive the S3 credentials they need to upload + * or download backups. The credentials are decrypted from the stored S3 source + * and returned over the authenticated TLS/WireGuard channel. + */ +export const getS3CredentialsOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getS3Credentials({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getS3CredentialsQueryKey(options) + }); +}; + export const listIpAccessControlQueryKey = (options?: Options) => createQueryKey('listIpAccessControl', options); /** @@ -7115,8 +7138,8 @@ export const updateEnvironmentSettingsMutation = (options?: Partial>): UseMutationOptions> => { const mutationOptions: UseMutationOptions> = { @@ -7152,8 +7175,9 @@ export const teardownEnvironmentMutation = (options?: Partial>): UseMutationOptions> => { const mutationOptions: UseMutationOptions> = { @@ -9343,6 +9367,28 @@ export const getJoinTokenStatusOptions = (options?: Options) => createQueryKey('getPublicSettings', options); + +/** + * Get public settings (no authentication required) + * Returns non-sensitive feature flags like demo mode status. + * This endpoint is intentionally unauthenticated so the login page can use it. + */ +export const getPublicSettingsOptions = (options?: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getPublicSettings({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getPublicSettingsQueryKey(options) + }); +}; + export const listTemplatesQueryKey = (options?: Options) => createQueryKey('listTemplates', options); /** diff --git a/web/src/api/client/sdk.gen.ts b/web/src/api/client/sdk.gen.ts index 98f481fc..cdae5079 100644 --- a/web/src/api/client/sdk.gen.ts +++ b/web/src/api/client/sdk.gen.ts @@ -1,7 +1,7 @@ // This file is auto-generated by @hey-api/openapi-ts import type { Options as ClientOptions, Client, TDataShape } from './client'; -import type { GetPlatformInfoData, GetPlatformInfoResponses, GetPlatformInfoErrors, ChunkUploadOptionsData, ChunkUploadOptionsResponses, CreateReleaseData, CreateReleaseResponses, CreateReleaseErrors, ListReleaseFilesData, ListReleaseFilesResponses, ListReleaseFilesErrors, UploadReleaseFileData, UploadReleaseFileResponses, UploadReleaseFileErrors, RecordEventMetricsData, RecordEventMetricsResponses, RecordEventMetricsErrors, AddSessionReplayEventsData, AddSessionReplayEventsResponses, AddSessionReplayEventsErrors, InitSessionReplayData, InitSessionReplayResponses, InitSessionReplayErrors, RecordSpeedMetricsData, RecordSpeedMetricsResponses, RecordSpeedMetricsErrors, UpdateSpeedMetricsData, UpdateSpeedMetricsResponses, UpdateSpeedMetricsErrors, GetPricingData, GetPricingResponses, GetPricingErrors, ListProviderKeysData, ListProviderKeysResponses, ListProviderKeysErrors, CreateProviderKeyData, CreateProviderKeyResponses, CreateProviderKeyErrors, TestProviderKeyInlineData, TestProviderKeyInlineResponses, TestProviderKeyInlineErrors, DeleteProviderKeyData, DeleteProviderKeyResponses, DeleteProviderKeyErrors, UpdateProviderKeyData, UpdateProviderKeyResponses, UpdateProviderKeyErrors, TestProviderKeyByIdData, TestProviderKeyByIdResponses, TestProviderKeyByIdErrors, GetUsageByProviderData, GetUsageByProviderResponses, GetUsageByProviderErrors, GetConversationsData, GetConversationsResponses, GetConversationsErrors, GetConversationDetailData, GetConversationDetailResponses, GetConversationDetailErrors, GetUsageRecentData, GetUsageRecentResponses, GetUsageRecentErrors, GetUsageSummaryData, GetUsageSummaryResponses, GetUsageSummaryErrors, GetUsageTimeseriesData, GetUsageTimeseriesResponses, GetUsageTimeseriesErrors, GetUsageTopModelsData, GetUsageTopModelsResponses, GetUsageTopModelsErrors, ChatCompletionsData, ChatCompletionsResponses, ChatCompletionsErrors, EmbeddingsData, EmbeddingsResponses, EmbeddingsErrors, ListModelsData, ListModelsResponses, ListModelsErrors, GetActiveVisitorsData, GetActiveVisitorsResponses, GetActiveVisitorsErrors, GetEventDetailData, GetEventDetailResponses, GetEventDetailErrors, GetEventVisitorsData, GetEventVisitorsResponses, GetEventVisitorsErrors, GetEventsCountData, GetEventsCountResponses, GetEventsCountErrors, GetGeneralStatsData, GetGeneralStatsResponses, GetGeneralStatsErrors, GetLiveVisitorsListData, GetLiveVisitorsListResponses, GetLiveVisitorsListErrors, GetPageFlowData, GetPageFlowResponses, GetPageFlowErrors, GetPageHourlySessionsData, GetPageHourlySessionsResponses, GetPageHourlySessionsErrors, GetPagePathDetailData, GetPagePathDetailResponses, GetPagePathDetailErrors, GetPagePathVisitorsData, GetPagePathVisitorsResponses, GetPagePathVisitorsErrors, GetPagePathsData, GetPagePathsResponses, GetPagePathsErrors, GetPagePathsSparklinesData, GetPagePathsSparklinesResponses, GetPagePathsSparklinesErrors, GetRecentActivityData, GetRecentActivityResponses, GetRecentActivityErrors, GetSessionDetailsData, GetSessionDetailsResponses, GetSessionDetailsErrors, GetSessionEventsData, GetSessionEventsResponses, GetSessionEventsErrors, GetSessionLogsData, GetSessionLogsResponses, GetSessionLogsErrors, GetVisitorsData, GetVisitorsResponses, GetVisitorsErrors, GetVisitorByGuidData, GetVisitorByGuidResponses, GetVisitorByGuidErrors, GetVisitorByIdData, GetVisitorByIdResponses, GetVisitorByIdErrors, GetVisitorDetailsData, GetVisitorDetailsResponses, GetVisitorDetailsErrors, EnrichVisitorData, EnrichVisitorResponses, EnrichVisitorErrors, GetVisitorInfoData, GetVisitorInfoResponses, GetVisitorInfoErrors, GetVisitorJourneyData, GetVisitorJourneyResponses, GetVisitorJourneyErrors, GetVisitorSessionsData, GetVisitorSessionsResponses, GetVisitorSessionsErrors, GetVisitorStatsData, GetVisitorStatsResponses, GetVisitorStatsErrors, ListApiKeysData, ListApiKeysResponses, ListApiKeysErrors, CreateApiKeyData, CreateApiKeyResponses, CreateApiKeyErrors, GetApiKeyPermissionsData, GetApiKeyPermissionsResponses, GetApiKeyPermissionsErrors, DeleteApiKeyData, DeleteApiKeyResponses, DeleteApiKeyErrors, GetApiKeyData, GetApiKeyResponses, GetApiKeyErrors, UpdateApiKeyData, UpdateApiKeyResponses, UpdateApiKeyErrors, ActivateApiKeyData, ActivateApiKeyResponses, ActivateApiKeyErrors, DeactivateApiKeyData, DeactivateApiKeyResponses, DeactivateApiKeyErrors, EmailStatusData, EmailStatusResponses, EmailStatusErrors, LoginData, LoginResponses, LoginErrors, RequestMagicLinkData, RequestMagicLinkResponses, RequestMagicLinkErrors, VerifyMagicLinkData, VerifyMagicLinkResponses, VerifyMagicLinkErrors, RequestPasswordResetData, RequestPasswordResetResponses, RequestPasswordResetErrors, ResetPasswordData, ResetPasswordResponses, ResetPasswordErrors, VerifyEmailData, VerifyEmailResponses, VerifyEmailErrors, VerifyMfaChallengeData, VerifyMfaChallengeResponses, VerifyMfaChallengeErrors, RunExternalServiceBackupData, RunExternalServiceBackupResponses, RunExternalServiceBackupErrors, ListS3SourcesData, ListS3SourcesResponses, ListS3SourcesErrors, CreateS3SourceData, CreateS3SourceResponses, CreateS3SourceErrors, DeleteS3SourceData, DeleteS3SourceResponses, DeleteS3SourceErrors, GetS3SourceData, GetS3SourceResponses, GetS3SourceErrors, UpdateS3SourceData, UpdateS3SourceResponses, UpdateS3SourceErrors, ListSourceBackupsData, ListSourceBackupsResponses, ListSourceBackupsErrors, RunBackupForSourceData, RunBackupForSourceResponses, RunBackupForSourceErrors, ListBackupSchedulesData, ListBackupSchedulesResponses, ListBackupSchedulesErrors, CreateBackupScheduleData, CreateBackupScheduleResponses, CreateBackupScheduleErrors, DeleteBackupScheduleData, DeleteBackupScheduleResponses, DeleteBackupScheduleErrors, GetBackupScheduleData, GetBackupScheduleResponses, GetBackupScheduleErrors, ListBackupsForScheduleData, ListBackupsForScheduleResponses, ListBackupsForScheduleErrors, DisableBackupScheduleData, DisableBackupScheduleResponses, DisableBackupScheduleErrors, EnableBackupScheduleData, EnableBackupScheduleResponses, EnableBackupScheduleErrors, GetBackupData, GetBackupResponses, GetBackupErrors, BlobDeleteData, BlobDeleteResponses, BlobDeleteErrors, BlobListData, BlobListResponses, BlobListErrors, BlobPutData, BlobPutResponses, BlobPutErrors, BlobCopyData, BlobCopyResponses, BlobCopyErrors, BlobDisableData, BlobDisableResponses, BlobDisableErrors, BlobEnableData, BlobEnableResponses, BlobEnableErrors, BlobStatusData, BlobStatusResponses, BlobStatusErrors, BlobUpdateData, BlobUpdateResponses, BlobUpdateErrors, BlobDownloadData, BlobDownloadResponses, BlobDownloadErrors, BlobHeadData, BlobHeadResponses, BlobHeadErrors, GetDashboardProjectsAnalyticsData, GetDashboardProjectsAnalyticsResponses, GetDashboardProjectsAnalyticsErrors, GetActivityGraphData, GetActivityGraphResponses, GetActivityGraphErrors, GetScanByDeploymentData, GetScanByDeploymentResponses, GetScanByDeploymentErrors, ListProvidersData, ListProvidersResponses, ListProvidersErrors, CreateProviderData, CreateProviderResponses, CreateProviderErrors, DeleteProviderData, DeleteProviderResponses, DeleteProviderErrors, GetProviderData, GetProviderResponses, GetProviderErrors, UpdateProviderData, UpdateProviderResponses, UpdateProviderErrors, ListManagedDomainsData, ListManagedDomainsResponses, ListManagedDomainsErrors, AddManagedDomainData, AddManagedDomainResponses, AddManagedDomainErrors, TestProviderConnectionData, TestProviderConnectionResponses, TestProviderConnectionErrors, ListProviderZonesData, ListProviderZonesResponses, ListProviderZonesErrors, RemoveManagedDomainData, RemoveManagedDomainResponses, RemoveManagedDomainErrors, VerifyManagedDomainData, VerifyManagedDomainResponses, VerifyManagedDomainErrors, LookupDnsARecordsData, LookupDnsARecordsResponses, LookupDnsARecordsErrors, ListDomainsData, ListDomainsResponses, ListDomainsErrors, CreateDomainData, CreateDomainResponses, CreateDomainErrors, GetDomainByHostData, GetDomainByHostResponses, GetDomainByHostErrors, CancelDomainOrderData, CancelDomainOrderResponses, CancelDomainOrderErrors, GetDomainOrderData, GetDomainOrderResponses, GetDomainOrderErrors, CreateOrRecreateOrderData, CreateOrRecreateOrderResponses, CreateOrRecreateOrderErrors, FinalizeOrderData, FinalizeOrderResponses, FinalizeOrderErrors, SetupDnsChallengeData, SetupDnsChallengeResponses, SetupDnsChallengeErrors, DeleteDomainData, DeleteDomainResponses, DeleteDomainErrors, GetDomainByIdData, GetDomainByIdResponses, GetDomainByIdErrors, GetChallengeTokenData, GetChallengeTokenResponses, GetChallengeTokenErrors, GetHttpChallengeDebugData, GetHttpChallengeDebugResponses, GetHttpChallengeDebugErrors, ProvisionDomainData, ProvisionDomainResponses, ProvisionDomainErrors, RenewDomainData, RenewDomainResponses, RenewDomainErrors, CheckDomainStatusData, CheckDomainStatusResponses, CheckDomainStatusErrors, ListDomains2Data, ListDomains2Responses, ListDomains2Errors, CreateDomain2Data, CreateDomain2Responses, CreateDomain2Errors, GetDomainByNameData, GetDomainByNameResponses, GetDomainByNameErrors, DeleteDomain2Data, DeleteDomain2Responses, DeleteDomain2Errors, GetDomainData, GetDomainResponses, GetDomainErrors, GetDomainDnsRecordsData, GetDomainDnsRecordsResponses, GetDomainDnsRecordsErrors, SetupDnsData, SetupDnsResponses, SetupDnsErrors, VerifyDomainData, VerifyDomainResponses, VerifyDomainErrors, ListProviders2Data, ListProviders2Responses, ListProviders2Errors, CreateProvider2Data, CreateProvider2Responses, CreateProvider2Errors, DeleteProvider2Data, DeleteProvider2Responses, DeleteProvider2Errors, GetProvider2Data, GetProvider2Responses, GetProvider2Errors, TestProviderData, TestProviderResponses, TestProviderErrors, ListEmailsData, ListEmailsResponses, ListEmailsErrors, SendEmailData, SendEmailResponses, SendEmailErrors, GetEmailStatsData, GetEmailStatsResponses, GetEmailStatsErrors, ValidateEmailData, ValidateEmailResponses, ValidateEmailErrors, GetEmailData, GetEmailResponses, GetEmailErrors, ListServicesData, ListServicesResponses, ListServicesErrors, CreateServiceData, CreateServiceResponses, CreateServiceErrors, ListAvailableContainersData, ListAvailableContainersResponses, ListAvailableContainersErrors, GetServiceBySlugData, GetServiceBySlugResponses, GetServiceBySlugErrors, ImportExternalServiceData, ImportExternalServiceResponses, ImportExternalServiceErrors, ListProjectServicesData, ListProjectServicesResponses, ListProjectServicesErrors, GetProjectServiceEnvironmentVariablesData, GetProjectServiceEnvironmentVariablesResponses, GetProjectServiceEnvironmentVariablesErrors, GetProvidersMetadataData, GetProvidersMetadataResponses, GetProvidersMetadataErrors, GetProviderMetadataData, GetProviderMetadataResponses, GetProviderMetadataErrors, GetServiceTypesData, GetServiceTypesResponses, GetServiceTypesErrors, GetServiceTypeParametersData, GetServiceTypeParametersResponses, GetServiceTypeParametersErrors, DeleteServiceData, DeleteServiceResponses, DeleteServiceErrors, GetServiceData, GetServiceResponses, GetServiceErrors, UpdateServiceData, UpdateServiceResponses, UpdateServiceErrors, GetServicePreviewEnvironmentVariablesMaskedData, GetServicePreviewEnvironmentVariablesMaskedResponses, GetServicePreviewEnvironmentVariablesMaskedErrors, GetServicePreviewEnvironmentVariableNamesData, GetServicePreviewEnvironmentVariableNamesResponses, GetServicePreviewEnvironmentVariableNamesErrors, ListServiceProjectsData, ListServiceProjectsResponses, ListServiceProjectsErrors, LinkServiceToProjectData, LinkServiceToProjectResponses, LinkServiceToProjectErrors, UnlinkServiceFromProjectData, UnlinkServiceFromProjectResponses, UnlinkServiceFromProjectErrors, GetServiceEnvironmentVariablesData, GetServiceEnvironmentVariablesResponses, GetServiceEnvironmentVariablesErrors, GetServiceEnvironmentVariableData, GetServiceEnvironmentVariableResponses, GetServiceEnvironmentVariableErrors, StartServiceData, StartServiceResponses, StartServiceErrors, StopServiceData, StopServiceResponses, StopServiceErrors, UpgradeServiceData, UpgradeServiceResponses, UpgradeServiceErrors, ListRootContainersData, ListRootContainersResponses, ListRootContainersErrors, ListContainersAtPathData, ListContainersAtPathResponses, ListContainersAtPathErrors, ListEntitiesData, ListEntitiesResponses, ListEntitiesErrors, GetEntityInfoData, GetEntityInfoResponses, GetEntityInfoErrors, QueryDataData, QueryDataResponses, QueryDataErrors, DownloadObjectData, DownloadObjectResponses, DownloadObjectErrors, GetContainerInfoData, GetContainerInfoResponses, GetContainerInfoErrors, CheckExplorerSupportData, CheckExplorerSupportResponses, CheckExplorerSupportErrors, GetFileData, GetFileResponses, GetFileErrors, GetIpGeolocationData, GetIpGeolocationResponses, GetIpGeolocationErrors, ListConnectionsData, ListConnectionsResponses, ListConnectionsErrors, DeleteConnectionData, DeleteConnectionResponses, DeleteConnectionErrors, ActivateConnectionData, ActivateConnectionResponses, ActivateConnectionErrors, DeactivateConnectionData, DeactivateConnectionResponses, DeactivateConnectionErrors, ListRepositoriesByConnectionData, ListRepositoriesByConnectionResponses, ListRepositoriesByConnectionErrors, SyncRepositoriesData, SyncRepositoriesResponses, SyncRepositoriesErrors, UpdateConnectionTokenData, UpdateConnectionTokenResponses, UpdateConnectionTokenErrors, ValidateConnectionData, ValidateConnectionResponses, ValidateConnectionErrors, ListGitProvidersData, ListGitProvidersResponses, ListGitProvidersErrors, CreateGitProviderData, CreateGitProviderResponses, CreateGitProviderErrors, CreateGithubPatProviderData, CreateGithubPatProviderResponses, CreateGithubPatProviderErrors, CreateGitlabOauthProviderData, CreateGitlabOauthProviderResponses, CreateGitlabOauthProviderErrors, CreateGitlabPatProviderData, CreateGitlabPatProviderResponses, CreateGitlabPatProviderErrors, DeleteProvider3Data, DeleteProvider3Responses, DeleteProvider3Errors, GetGitProviderData, GetGitProviderResponses, GetGitProviderErrors, ActivateProviderData, ActivateProviderResponses, ActivateProviderErrors, HandleGitProviderOauthCallbackData, HandleGitProviderOauthCallbackErrors, GetProviderConnectionsData, GetProviderConnectionsResponses, GetProviderConnectionsErrors, DeactivateProviderData, DeactivateProviderResponses, DeactivateProviderErrors, CheckProviderDeletionSafetyData, CheckProviderDeletionSafetyResponses, CheckProviderDeletionSafetyErrors, StartGitProviderOauthData, StartGitProviderOauthErrors, DeleteProviderSafelyData, DeleteProviderSafelyResponses, DeleteProviderSafelyErrors, GetPublicRepositoryData, GetPublicRepositoryResponses, GetPublicRepositoryErrors, GetPublicBranchesData, GetPublicBranchesResponses, GetPublicBranchesErrors, DetectPublicPresetsData, DetectPublicPresetsResponses, DetectPublicPresetsErrors, DiscoverWorkloadsData, DiscoverWorkloadsResponses, DiscoverWorkloadsErrors, ExecuteImportData, ExecuteImportResponses, ExecuteImportErrors, CreatePlanData, CreatePlanResponses, CreatePlanErrors, ListSourcesData, ListSourcesResponses, ListSourcesErrors, GetImportStatusData, GetImportStatusResponses, GetImportStatusErrors, GetIncidentData, GetIncidentResponses, GetIncidentErrors, UpdateIncidentStatusData, UpdateIncidentStatusResponses, UpdateIncidentStatusErrors, GetIncidentUpdatesData, GetIncidentUpdatesResponses, GetIncidentUpdatesErrors, AdminListNodesData, AdminListNodesResponses, AdminListNodesErrors, RegisterNodeData, RegisterNodeResponses, RegisterNodeErrors, AdminRemoveNodeData, AdminRemoveNodeResponses, AdminRemoveNodeErrors, AdminGetNodeData, AdminGetNodeResponses, AdminGetNodeErrors, AdminListNodeContainersData, AdminListNodeContainersResponses, AdminListNodeContainersErrors, AdminUndrainNodeData, AdminUndrainNodeResponses, AdminUndrainNodeErrors, AdminDrainStatusData, AdminDrainStatusResponses, AdminDrainStatusErrors, AdminDrainNodeData, AdminDrainNodeResponses, AdminDrainNodeErrors, NodeHeartbeatData, NodeHeartbeatResponses, NodeHeartbeatErrors, ListIpAccessControlData, ListIpAccessControlResponses, ListIpAccessControlErrors, CreateIpAccessControlData, CreateIpAccessControlResponses, CreateIpAccessControlErrors, CheckIpBlockedData, CheckIpBlockedResponses, CheckIpBlockedErrors, DeleteIpAccessControlData, DeleteIpAccessControlResponses, DeleteIpAccessControlErrors, GetIpAccessControlData, GetIpAccessControlResponses, GetIpAccessControlErrors, UpdateIpAccessControlData, UpdateIpAccessControlResponses, UpdateIpAccessControlErrors, KvDelData, KvDelResponses, KvDelErrors, KvDisableData, KvDisableResponses, KvDisableErrors, KvEnableData, KvEnableResponses, KvEnableErrors, KvExpireData, KvExpireResponses, KvExpireErrors, KvGetData, KvGetResponses, KvGetErrors, KvIncrData, KvIncrResponses, KvIncrErrors, KvKeysData, KvKeysResponses, KvKeysErrors, KvSetData, KvSetResponses, KvSetErrors, KvStatusData, KvStatusResponses, KvStatusErrors, KvTtlData, KvTtlResponses, KvTtlErrors, KvUpdateData, KvUpdateResponses, KvUpdateErrors, ListRoutesData, ListRoutesResponses, ListRoutesErrors, CreateRouteData, CreateRouteResponses, CreateRouteErrors, DeleteRouteData, DeleteRouteResponses, DeleteRouteErrors, GetRouteData, GetRouteResponses, GetRouteErrors, UpdateRouteData, UpdateRouteResponses, UpdateRouteErrors, LogoutData, LogoutResponses, LogoutErrors, GetLogContextData, GetLogContextResponses, GetLogContextErrors, SearchLogsData, SearchLogsResponses, SearchLogsErrors, TailLogsData, TailLogsResponses, TailLogsErrors, DeleteMonitorData, DeleteMonitorResponses, DeleteMonitorErrors, GetMonitorData, GetMonitorResponses, GetMonitorErrors, GetBucketedStatusData, GetBucketedStatusResponses, GetBucketedStatusErrors, GetCurrentMonitorStatusData, GetCurrentMonitorStatusResponses, GetCurrentMonitorStatusErrors, GetUptimeHistoryData, GetUptimeHistoryResponses, GetUptimeHistoryErrors, DeletePreferencesData, DeletePreferencesResponses, DeletePreferencesErrors, GetPreferencesData, GetPreferencesResponses, GetPreferencesErrors, UpdatePreferencesData, UpdatePreferencesResponses, UpdatePreferencesErrors, ListNotificationProvidersData, ListNotificationProvidersResponses, ListNotificationProvidersErrors, CreateNotificationProviderData, CreateNotificationProviderResponses, CreateNotificationProviderErrors, CreateEmailProviderData, CreateEmailProviderResponses, CreateEmailProviderErrors, UpdateEmailProviderData, UpdateEmailProviderResponses, UpdateEmailProviderErrors, CreateSlackProviderData, CreateSlackProviderResponses, CreateSlackProviderErrors, UpdateSlackProviderData, UpdateSlackProviderResponses, UpdateSlackProviderErrors, CreateWebhookProviderData, CreateWebhookProviderResponses, CreateWebhookProviderErrors, UpdateWebhookProviderData, UpdateWebhookProviderResponses, UpdateWebhookProviderErrors, DeleteProvider4Data, DeleteProvider4Responses, DeleteProvider4Errors, GetNotificationProviderData, GetNotificationProviderResponses, GetNotificationProviderErrors, UpdateProvider2Data, UpdateProvider2Responses, UpdateProvider2Errors, TestProvider2Data, TestProvider2Responses, TestProvider2Errors, ListOrdersData, ListOrdersResponses, ListOrdersErrors, QueryGenaiTracesData, QueryGenaiTracesResponses, QueryGenaiTracesErrors, GetGenaiTraceData, GetGenaiTraceResponses, GetGenaiTraceErrors, GetHealthData, GetHealthResponses, GetHealthErrors, ListInsightsData, ListInsightsResponses, ListInsightsErrors, QueryLogsData, QueryLogsResponses, QueryLogsErrors, ListMetricNamesData, ListMetricNamesResponses, ListMetricNamesErrors, QueryMetricsData, QueryMetricsResponses, QueryMetricsErrors, GetPipelineStatsData, GetPipelineStatsResponses, GetPipelineStatsErrors, GetQuotaData, GetQuotaResponses, GetQuotaErrors, QueryTraceSummariesData, QueryTraceSummariesResponses, QueryTraceSummariesErrors, QueryTracesData, QueryTracesResponses, QueryTracesErrors, GetTraceData, GetTraceResponses, GetTraceErrors, IngestLogsData, IngestLogsResponses, IngestLogsErrors, IngestMetricsData, IngestMetricsResponses, IngestMetricsErrors, IngestTracesData, IngestTracesResponses, IngestTracesErrors, IngestLogsByPathData, IngestLogsByPathResponses, IngestLogsByPathErrors, IngestMetricsByPathData, IngestMetricsByPathResponses, IngestMetricsByPathErrors, IngestTracesByPathData, IngestTracesByPathResponses, IngestTracesByPathErrors, HasPerformanceMetricsData, HasPerformanceMetricsResponses, HasPerformanceMetricsErrors, GetPerformanceMetricsData, GetPerformanceMetricsResponses, GetPerformanceMetricsErrors, GetMetricsOverTimeData, GetMetricsOverTimeResponses, GetMetricsOverTimeErrors, GetGroupedPageMetricsData, GetGroupedPageMetricsResponses, GetGroupedPageMetricsErrors, GetAccessInfoData, GetAccessInfoResponses, GetAccessInfoErrors, GetPrivateIpData, GetPrivateIpResponses, GetPrivateIpErrors, GetPublicIpData, GetPublicIpResponses, GetPublicIpErrors, ListPresetsData, ListPresetsResponses, ListPresetsErrors, GeneratePresetDockerfileData, GeneratePresetDockerfileResponses, GeneratePresetDockerfileErrors, GetProjectsData, GetProjectsResponses, GetProjectsErrors, CreateProjectData, CreateProjectResponses, CreateProjectErrors, GetProjectBySlugData, GetProjectBySlugResponses, GetProjectBySlugErrors, CreateProjectFromTemplateData, CreateProjectFromTemplateResponses, CreateProjectFromTemplateErrors, GetProjectStatisticsData, GetProjectStatisticsResponses, GetProjectStatisticsErrors, DeleteProjectData, DeleteProjectResponses, DeleteProjectErrors, GetProjectData, GetProjectResponses, GetProjectErrors, UpdateProjectData, UpdateProjectResponses, UpdateProjectErrors, GetProjectDeploymentsData, GetProjectDeploymentsResponses, GetProjectDeploymentsErrors, GetLastDeploymentData, GetLastDeploymentResponses, GetLastDeploymentErrors, TriggerProjectPipelineData, TriggerProjectPipelineResponses, TriggerProjectPipelineErrors, GetActiveVisitors2Data, GetActiveVisitors2Responses, GetActiveVisitors2Errors, GetAggregatedBucketsData, GetAggregatedBucketsResponses, GetAggregatedBucketsErrors, UpdateAutomaticDeployData, UpdateAutomaticDeployResponses, UpdateAutomaticDeployErrors, ListCustomDomainsForProjectData, ListCustomDomainsForProjectResponses, ListCustomDomainsForProjectErrors, CreateCustomDomainData, CreateCustomDomainResponses, CreateCustomDomainErrors, DeleteCustomDomainData, DeleteCustomDomainResponses, DeleteCustomDomainErrors, GetCustomDomainData, GetCustomDomainResponses, GetCustomDomainErrors, UpdateCustomDomainData, UpdateCustomDomainResponses, UpdateCustomDomainErrors, LinkCustomDomainToCertificateData, LinkCustomDomainToCertificateResponses, LinkCustomDomainToCertificateErrors, UpdateProjectDeploymentConfigData, UpdateProjectDeploymentConfigResponses, UpdateProjectDeploymentConfigErrors, GetDeploymentData, GetDeploymentResponses, GetDeploymentErrors, CancelDeploymentData, CancelDeploymentResponses, CancelDeploymentErrors, GetDeploymentJobsData, GetDeploymentJobsResponses, GetDeploymentJobsErrors, GetDeploymentJobLogsData, GetDeploymentJobLogsResponses, GetDeploymentJobLogsErrors, TailDeploymentJobLogsData, TailDeploymentJobLogsErrors, GetDeploymentOperationsData, GetDeploymentOperationsResponses, GetDeploymentOperationsErrors, ExecuteDeploymentOperationData, ExecuteDeploymentOperationResponses, ExecuteDeploymentOperationErrors, GetDeploymentOperationStatusData, GetDeploymentOperationStatusResponses, GetDeploymentOperationStatusErrors, PauseDeploymentData, PauseDeploymentResponses, PauseDeploymentErrors, PromoteDeploymentData, PromoteDeploymentResponses, PromoteDeploymentErrors, ResumeDeploymentData, ResumeDeploymentResponses, ResumeDeploymentErrors, RollbackToDeploymentData, RollbackToDeploymentResponses, RollbackToDeploymentErrors, TeardownDeploymentData, TeardownDeploymentResponses, TeardownDeploymentErrors, ListDsnsData, ListDsnsResponses, CreateDsnData, CreateDsnResponses, CreateDsnErrors, GetOrCreateDsnData, GetOrCreateDsnResponses, GetOrCreateDsnErrors, RegenerateDsnData, RegenerateDsnResponses, RegenerateDsnErrors, RevokeDsnData, RevokeDsnResponses, RevokeDsnErrors, GetEnvironmentVariablesData, GetEnvironmentVariablesResponses, GetEnvironmentVariablesErrors, CreateEnvironmentVariableData, CreateEnvironmentVariableResponses, CreateEnvironmentVariableErrors, GetEnvironmentVariableValueData, GetEnvironmentVariableValueResponses, GetEnvironmentVariableValueErrors, DeleteEnvironmentVariableData, DeleteEnvironmentVariableResponses, DeleteEnvironmentVariableErrors, UpdateEnvironmentVariableData, UpdateEnvironmentVariableResponses, UpdateEnvironmentVariableErrors, GetEnvironmentsData, GetEnvironmentsResponses, GetEnvironmentsErrors, CreateEnvironmentData, CreateEnvironmentResponses, CreateEnvironmentErrors, DeleteEnvironmentData, DeleteEnvironmentResponses, DeleteEnvironmentErrors, GetEnvironmentData, GetEnvironmentResponses, GetEnvironmentErrors, GetEnvironmentCronsData, GetEnvironmentCronsResponses, GetEnvironmentCronsErrors, GetCronByIdData, GetCronByIdResponses, GetCronByIdErrors, GetCronExecutionsData, GetCronExecutionsResponses, GetCronExecutionsErrors, GetEnvironmentDomainsData, GetEnvironmentDomainsResponses, GetEnvironmentDomainsErrors, AddEnvironmentDomainData, AddEnvironmentDomainResponses, AddEnvironmentDomainErrors, DeleteEnvironmentDomainData, DeleteEnvironmentDomainResponses, DeleteEnvironmentDomainErrors, UpdateEnvironmentSettingsData, UpdateEnvironmentSettingsResponses, UpdateEnvironmentSettingsErrors, SleepEnvironmentData, SleepEnvironmentResponses, SleepEnvironmentErrors, TeardownEnvironmentData, TeardownEnvironmentResponses, TeardownEnvironmentErrors, WakeEnvironmentData, WakeEnvironmentResponses, WakeEnvironmentErrors, GetContainerLogsData, GetContainerLogsErrors, ListContainersData, ListContainersResponses, ListContainersErrors, GetContainerDetailData, GetContainerDetailResponses, GetContainerDetailErrors, GetContainerLogsByIdData, GetContainerLogsByIdErrors, GetContainerMetricsData, GetContainerMetricsResponses, GetContainerMetricsErrors, StreamContainerMetricsData, StreamContainerMetricsResponses, StreamContainerMetricsErrors, RestartContainerData, RestartContainerResponses, RestartContainerErrors, StartContainerData, StartContainerResponses, StartContainerErrors, StopContainerData, StopContainerResponses, StopContainerErrors, DeployFromImageData, DeployFromImageResponses, DeployFromImageErrors, DeployFromImageUploadData, DeployFromImageUploadResponses, DeployFromImageUploadErrors, DeployFromStaticData, DeployFromStaticResponses, DeployFromStaticErrors, GetErrorDashboardStatsData, GetErrorDashboardStatsResponses, GetErrorDashboardStatsErrors, ListErrorGroupsData, ListErrorGroupsResponses, ListErrorGroupsErrors, GetErrorGroupData, GetErrorGroupResponses, GetErrorGroupErrors, UpdateErrorGroupData, UpdateErrorGroupResponses, UpdateErrorGroupErrors, ListErrorEventsData, ListErrorEventsResponses, ListErrorEventsErrors, GetErrorEventData, GetErrorEventResponses, GetErrorEventErrors, GetErrorStatsData, GetErrorStatsResponses, GetErrorStatsErrors, GetErrorTimeSeriesData, GetErrorTimeSeriesResponses, GetErrorTimeSeriesErrors, GetEventsCount2Data, GetEventsCount2Responses, GetEventsCount2Errors, GetEventTypeBreakdownData, GetEventTypeBreakdownResponses, GetEventTypeBreakdownErrors, RecordConsoleEventData, RecordConsoleEventResponses, RecordConsoleEventErrors, GetPropertyBreakdownData, GetPropertyBreakdownResponses, GetPropertyBreakdownErrors, GetPropertyTimelineData, GetPropertyTimelineResponses, GetPropertyTimelineErrors, GetEventsTimelineData, GetEventsTimelineResponses, GetEventsTimelineErrors, GetUniqueEventsData, GetUniqueEventsResponses, GetUniqueEventsErrors, ListExternalImagesData, ListExternalImagesResponses, ListExternalImagesErrors, RegisterExternalImageData, RegisterExternalImageResponses, RegisterExternalImageErrors, DeleteExternalImageData, DeleteExternalImageResponses, DeleteExternalImageErrors, GetExternalImageData, GetExternalImageResponses, GetExternalImageErrors, ListFunnelsData, ListFunnelsResponses, ListFunnelsErrors, CreateFunnelData, CreateFunnelResponses, CreateFunnelErrors, PreviewFunnelMetricsData, PreviewFunnelMetricsResponses, PreviewFunnelMetricsErrors, DeleteFunnelData, DeleteFunnelResponses, DeleteFunnelErrors, UpdateFunnelData, UpdateFunnelResponses, UpdateFunnelErrors, GetFunnelMetricsData, GetFunnelMetricsResponses, GetFunnelMetricsErrors, UpdateGitSettingsData, UpdateGitSettingsResponses, UpdateGitSettingsErrors, HasErrorGroupsData, HasErrorGroupsResponses, HasErrorGroupsErrors, HasAnalyticsEventsData, HasAnalyticsEventsResponses, HasAnalyticsEventsErrors, GetHourlyVisitsData, GetHourlyVisitsResponses, GetHourlyVisitsErrors, ListExternalImages2Data, ListExternalImages2Responses, ListExternalImages2Errors, PushExternalImageData, PushExternalImageResponses, PushExternalImageErrors, GetExternalImage2Data, GetExternalImage2Responses, GetExternalImage2Errors, ListIncidentsData, ListIncidentsResponses, ListIncidentsErrors, CreateIncidentData, CreateIncidentResponses, CreateIncidentErrors, GetBucketedIncidentsData, GetBucketedIncidentsResponses, GetBucketedIncidentsErrors, PurgeProjectLogsData, PurgeProjectLogsResponses, PurgeProjectLogsErrors, ListMonitorsData, ListMonitorsResponses, ListMonitorsErrors, CreateMonitorData, CreateMonitorResponses, CreateMonitorErrors, DeleteReleaseSourceMapsData, DeleteReleaseSourceMapsResponses, DeleteReleaseSourceMapsErrors, ListSourceMapsData, ListSourceMapsResponses, ListSourceMapsErrors, UploadSourceMapData, UploadSourceMapResponses, UploadSourceMapErrors, UpdateProjectSettingsData, UpdateProjectSettingsResponses, UpdateProjectSettingsErrors, ListReleasesData, ListReleasesResponses, ListReleasesErrors, DeleteSourceMapData, DeleteSourceMapResponses, DeleteSourceMapErrors, ListStaticBundlesData, ListStaticBundlesResponses, ListStaticBundlesErrors, DeleteStaticBundleData, DeleteStaticBundleResponses, DeleteStaticBundleErrors, GetStaticBundleData, GetStaticBundleResponses, GetStaticBundleErrors, GetStatusOverviewData, GetStatusOverviewResponses, GetStatusOverviewErrors, GetUniqueCountsData, GetUniqueCountsResponses, GetUniqueCountsErrors, UploadStaticBundleData, UploadStaticBundleResponses, UploadStaticBundleErrors, ListProjectScansData, ListProjectScansResponses, ListProjectScansErrors, TriggerScanData, TriggerScanResponses, TriggerScanErrors, GetLatestScansPerEnvironmentData, GetLatestScansPerEnvironmentResponses, GetLatestScansPerEnvironmentErrors, GetLatestScanData, GetLatestScanResponses, GetLatestScanErrors, ListWebhooksData, ListWebhooksResponses, ListWebhooksErrors, CreateWebhookData, CreateWebhookResponses, CreateWebhookErrors, DeleteWebhookData, DeleteWebhookResponses, DeleteWebhookErrors, GetWebhookData, GetWebhookResponses, GetWebhookErrors, UpdateWebhookData, UpdateWebhookResponses, UpdateWebhookErrors, ListDeliveriesData, ListDeliveriesResponses, ListDeliveriesErrors, GetDeliveryData, GetDeliveryResponses, GetDeliveryErrors, RetryDeliveryData, RetryDeliveryResponses, RetryDeliveryErrors, GetProxyLogsData, GetProxyLogsResponses, GetProxyLogsErrors, GetProxyLogByRequestIdData, GetProxyLogByRequestIdResponses, GetProxyLogByRequestIdErrors, GetTimeBucketStatsData, GetTimeBucketStatsResponses, GetTimeBucketStatsErrors, GetTodayStatsData, GetTodayStatsResponses, GetTodayStatsErrors, GetProxyLogByIdData, GetProxyLogByIdResponses, GetProxyLogByIdErrors, ListSyncedRepositoriesData, ListSyncedRepositoriesResponses, ListSyncedRepositoriesErrors, GetRepositoryByNameData, GetRepositoryByNameResponses, GetRepositoryByNameErrors, GetAllRepositoriesByNameData, GetAllRepositoriesByNameResponses, GetAllRepositoriesByNameErrors, GetRepositoryPresetByNameData, GetRepositoryPresetByNameResponses, GetRepositoryPresetByNameErrors, GetRepositoryBranchesData, GetRepositoryBranchesResponses, GetRepositoryBranchesErrors, GetRepositoryTagsData, GetRepositoryTagsResponses, GetRepositoryTagsErrors, GetRepositoryPresetLiveData, GetRepositoryPresetLiveResponses, GetRepositoryPresetLiveErrors, GetBranchesByRepositoryIdData, GetBranchesByRepositoryIdResponses, GetBranchesByRepositoryIdErrors, ListCommitsByRepositoryIdData, ListCommitsByRepositoryIdResponses, ListCommitsByRepositoryIdErrors, CheckCommitExistsData, CheckCommitExistsResponses, CheckCommitExistsErrors, GetTagsByRepositoryIdData, GetTagsByRepositoryIdResponses, GetTagsByRepositoryIdErrors, GetProjectSessionReplaysData, GetProjectSessionReplaysResponses, GetProjectSessionReplaysErrors, GetSessionEvents2Data, GetSessionEvents2Responses, GetSessionEvents2Errors, GetSettingsData, GetSettingsResponses, GetSettingsErrors, UpdateSettingsData, UpdateSettingsResponses, UpdateSettingsErrors, RevokeJoinTokenData, RevokeJoinTokenResponses, RevokeJoinTokenErrors, GenerateJoinTokenData, GenerateJoinTokenResponses, GenerateJoinTokenErrors, GetJoinTokenStatusData, GetJoinTokenStatusResponses, GetJoinTokenStatusErrors, ListTemplatesData, ListTemplatesResponses, ListTemplatesErrors, ListTemplateTagsData, ListTemplateTagsResponses, ListTemplateTagsErrors, GetTemplateData, GetTemplateResponses, GetTemplateErrors, GetCurrentUserData, GetCurrentUserResponses, GetCurrentUserErrors, ListUsersData, ListUsersResponses, ListUsersErrors, CreateUserData, CreateUserResponses, CreateUserErrors, UpdateSelfData, UpdateSelfResponses, UpdateSelfErrors, DisableMfaData, DisableMfaResponses, DisableMfaErrors, SetupMfaData, SetupMfaResponses, SetupMfaErrors, VerifyAndEnableMfaData, VerifyAndEnableMfaResponses, VerifyAndEnableMfaErrors, DeleteUserData, DeleteUserResponses, DeleteUserErrors, UpdateUserData, UpdateUserResponses, UpdateUserErrors, RestoreUserData, RestoreUserResponses, RestoreUserErrors, AssignRoleData, AssignRoleResponses, AssignRoleErrors, RemoveRoleData, RemoveRoleResponses, RemoveRoleErrors, GetVisitorSessions2Data, GetVisitorSessions2Responses, GetVisitorSessions2Errors, DeleteSessionReplayData, DeleteSessionReplayResponses, DeleteSessionReplayErrors, GetSessionReplayData, GetSessionReplayResponses, GetSessionReplayErrors, UpdateSessionDurationData, UpdateSessionDurationResponses, UpdateSessionDurationErrors, GetSessionReplayEventsData, GetSessionReplayEventsResponses, GetSessionReplayEventsErrors, AddEventsData, AddEventsResponses, AddEventsErrors, DeleteScanData, DeleteScanResponses, DeleteScanErrors, GetScanData, GetScanResponses, GetScanErrors, GetScanVulnerabilitiesData, GetScanVulnerabilitiesResponses, GetScanVulnerabilitiesErrors, ListEventTypesData, ListEventTypesResponses, TriggerWeeklyDigestData, TriggerWeeklyDigestResponses, TriggerWeeklyDigestErrors, ListExternalPluginsData, ListExternalPluginsResponses, ReloadPluginsData, ReloadPluginsResponses, ReloadPluginsErrors, IngestSentryEnvelopeData, IngestSentryEnvelopeResponses, IngestSentryEnvelopeErrors, IngestSentryEventData, IngestSentryEventResponses, IngestSentryEventErrors, ListAuditLogsData, ListAuditLogsResponses, ListAuditLogsErrors, GetAuditLogData, GetAuditLogResponses, GetAuditLogErrors } from './types.gen'; +import type { GetPlatformInfoData, GetPlatformInfoResponses, GetPlatformInfoErrors, ChunkUploadOptionsData, ChunkUploadOptionsResponses, CreateReleaseData, CreateReleaseResponses, CreateReleaseErrors, ListReleaseFilesData, ListReleaseFilesResponses, ListReleaseFilesErrors, UploadReleaseFileData, UploadReleaseFileResponses, UploadReleaseFileErrors, RecordEventMetricsData, RecordEventMetricsResponses, RecordEventMetricsErrors, AddSessionReplayEventsData, AddSessionReplayEventsResponses, AddSessionReplayEventsErrors, InitSessionReplayData, InitSessionReplayResponses, InitSessionReplayErrors, RecordSpeedMetricsData, RecordSpeedMetricsResponses, RecordSpeedMetricsErrors, UpdateSpeedMetricsData, UpdateSpeedMetricsResponses, UpdateSpeedMetricsErrors, GetPricingData, GetPricingResponses, GetPricingErrors, ListProviderKeysData, ListProviderKeysResponses, ListProviderKeysErrors, CreateProviderKeyData, CreateProviderKeyResponses, CreateProviderKeyErrors, TestProviderKeyInlineData, TestProviderKeyInlineResponses, TestProviderKeyInlineErrors, DeleteProviderKeyData, DeleteProviderKeyResponses, DeleteProviderKeyErrors, UpdateProviderKeyData, UpdateProviderKeyResponses, UpdateProviderKeyErrors, TestProviderKeyByIdData, TestProviderKeyByIdResponses, TestProviderKeyByIdErrors, GetUsageByProviderData, GetUsageByProviderResponses, GetUsageByProviderErrors, GetConversationsData, GetConversationsResponses, GetConversationsErrors, GetConversationDetailData, GetConversationDetailResponses, GetConversationDetailErrors, GetUsageRecentData, GetUsageRecentResponses, GetUsageRecentErrors, GetUsageSummaryData, GetUsageSummaryResponses, GetUsageSummaryErrors, GetUsageTimeseriesData, GetUsageTimeseriesResponses, GetUsageTimeseriesErrors, GetUsageTopModelsData, GetUsageTopModelsResponses, GetUsageTopModelsErrors, ChatCompletionsData, ChatCompletionsResponses, ChatCompletionsErrors, EmbeddingsData, EmbeddingsResponses, EmbeddingsErrors, ListModelsData, ListModelsResponses, ListModelsErrors, GetActiveVisitorsData, GetActiveVisitorsResponses, GetActiveVisitorsErrors, GetEventDetailData, GetEventDetailResponses, GetEventDetailErrors, GetEventVisitorsData, GetEventVisitorsResponses, GetEventVisitorsErrors, GetEventsCountData, GetEventsCountResponses, GetEventsCountErrors, GetGeneralStatsData, GetGeneralStatsResponses, GetGeneralStatsErrors, GetLiveVisitorsListData, GetLiveVisitorsListResponses, GetLiveVisitorsListErrors, GetPageFlowData, GetPageFlowResponses, GetPageFlowErrors, GetPageHourlySessionsData, GetPageHourlySessionsResponses, GetPageHourlySessionsErrors, GetPagePathDetailData, GetPagePathDetailResponses, GetPagePathDetailErrors, GetPagePathVisitorsData, GetPagePathVisitorsResponses, GetPagePathVisitorsErrors, GetPagePathsData, GetPagePathsResponses, GetPagePathsErrors, GetPagePathsSparklinesData, GetPagePathsSparklinesResponses, GetPagePathsSparklinesErrors, GetRecentActivityData, GetRecentActivityResponses, GetRecentActivityErrors, GetSessionDetailsData, GetSessionDetailsResponses, GetSessionDetailsErrors, GetSessionEventsData, GetSessionEventsResponses, GetSessionEventsErrors, GetSessionLogsData, GetSessionLogsResponses, GetSessionLogsErrors, GetVisitorsData, GetVisitorsResponses, GetVisitorsErrors, GetVisitorByGuidData, GetVisitorByGuidResponses, GetVisitorByGuidErrors, GetVisitorByIdData, GetVisitorByIdResponses, GetVisitorByIdErrors, GetVisitorDetailsData, GetVisitorDetailsResponses, GetVisitorDetailsErrors, EnrichVisitorData, EnrichVisitorResponses, EnrichVisitorErrors, GetVisitorInfoData, GetVisitorInfoResponses, GetVisitorInfoErrors, GetVisitorJourneyData, GetVisitorJourneyResponses, GetVisitorJourneyErrors, GetVisitorSessionsData, GetVisitorSessionsResponses, GetVisitorSessionsErrors, GetVisitorStatsData, GetVisitorStatsResponses, GetVisitorStatsErrors, ListApiKeysData, ListApiKeysResponses, ListApiKeysErrors, CreateApiKeyData, CreateApiKeyResponses, CreateApiKeyErrors, GetApiKeyPermissionsData, GetApiKeyPermissionsResponses, GetApiKeyPermissionsErrors, DeleteApiKeyData, DeleteApiKeyResponses, DeleteApiKeyErrors, GetApiKeyData, GetApiKeyResponses, GetApiKeyErrors, UpdateApiKeyData, UpdateApiKeyResponses, UpdateApiKeyErrors, ActivateApiKeyData, ActivateApiKeyResponses, ActivateApiKeyErrors, DeactivateApiKeyData, DeactivateApiKeyResponses, DeactivateApiKeyErrors, EmailStatusData, EmailStatusResponses, EmailStatusErrors, LoginData, LoginResponses, LoginErrors, RequestMagicLinkData, RequestMagicLinkResponses, RequestMagicLinkErrors, VerifyMagicLinkData, VerifyMagicLinkResponses, VerifyMagicLinkErrors, RequestPasswordResetData, RequestPasswordResetResponses, RequestPasswordResetErrors, ResetPasswordData, ResetPasswordResponses, ResetPasswordErrors, VerifyEmailData, VerifyEmailResponses, VerifyEmailErrors, VerifyMfaChallengeData, VerifyMfaChallengeResponses, VerifyMfaChallengeErrors, RunExternalServiceBackupData, RunExternalServiceBackupResponses, RunExternalServiceBackupErrors, ListS3SourcesData, ListS3SourcesResponses, ListS3SourcesErrors, CreateS3SourceData, CreateS3SourceResponses, CreateS3SourceErrors, DeleteS3SourceData, DeleteS3SourceResponses, DeleteS3SourceErrors, GetS3SourceData, GetS3SourceResponses, GetS3SourceErrors, UpdateS3SourceData, UpdateS3SourceResponses, UpdateS3SourceErrors, ListSourceBackupsData, ListSourceBackupsResponses, ListSourceBackupsErrors, RunBackupForSourceData, RunBackupForSourceResponses, RunBackupForSourceErrors, ListBackupSchedulesData, ListBackupSchedulesResponses, ListBackupSchedulesErrors, CreateBackupScheduleData, CreateBackupScheduleResponses, CreateBackupScheduleErrors, DeleteBackupScheduleData, DeleteBackupScheduleResponses, DeleteBackupScheduleErrors, GetBackupScheduleData, GetBackupScheduleResponses, GetBackupScheduleErrors, ListBackupsForScheduleData, ListBackupsForScheduleResponses, ListBackupsForScheduleErrors, DisableBackupScheduleData, DisableBackupScheduleResponses, DisableBackupScheduleErrors, EnableBackupScheduleData, EnableBackupScheduleResponses, EnableBackupScheduleErrors, GetBackupData, GetBackupResponses, GetBackupErrors, BlobDeleteData, BlobDeleteResponses, BlobDeleteErrors, BlobListData, BlobListResponses, BlobListErrors, BlobPutData, BlobPutResponses, BlobPutErrors, BlobCopyData, BlobCopyResponses, BlobCopyErrors, BlobDisableData, BlobDisableResponses, BlobDisableErrors, BlobEnableData, BlobEnableResponses, BlobEnableErrors, BlobStatusData, BlobStatusResponses, BlobStatusErrors, BlobUpdateData, BlobUpdateResponses, BlobUpdateErrors, BlobDownloadData, BlobDownloadResponses, BlobDownloadErrors, BlobHeadData, BlobHeadResponses, BlobHeadErrors, GetDashboardProjectsAnalyticsData, GetDashboardProjectsAnalyticsResponses, GetDashboardProjectsAnalyticsErrors, GetActivityGraphData, GetActivityGraphResponses, GetActivityGraphErrors, GetScanByDeploymentData, GetScanByDeploymentResponses, GetScanByDeploymentErrors, ListProvidersData, ListProvidersResponses, ListProvidersErrors, CreateProviderData, CreateProviderResponses, CreateProviderErrors, DeleteProviderData, DeleteProviderResponses, DeleteProviderErrors, GetProviderData, GetProviderResponses, GetProviderErrors, UpdateProviderData, UpdateProviderResponses, UpdateProviderErrors, ListManagedDomainsData, ListManagedDomainsResponses, ListManagedDomainsErrors, AddManagedDomainData, AddManagedDomainResponses, AddManagedDomainErrors, TestProviderConnectionData, TestProviderConnectionResponses, TestProviderConnectionErrors, ListProviderZonesData, ListProviderZonesResponses, ListProviderZonesErrors, RemoveManagedDomainData, RemoveManagedDomainResponses, RemoveManagedDomainErrors, VerifyManagedDomainData, VerifyManagedDomainResponses, VerifyManagedDomainErrors, LookupDnsARecordsData, LookupDnsARecordsResponses, LookupDnsARecordsErrors, ListDomainsData, ListDomainsResponses, ListDomainsErrors, CreateDomainData, CreateDomainResponses, CreateDomainErrors, GetDomainByHostData, GetDomainByHostResponses, GetDomainByHostErrors, CancelDomainOrderData, CancelDomainOrderResponses, CancelDomainOrderErrors, GetDomainOrderData, GetDomainOrderResponses, GetDomainOrderErrors, CreateOrRecreateOrderData, CreateOrRecreateOrderResponses, CreateOrRecreateOrderErrors, FinalizeOrderData, FinalizeOrderResponses, FinalizeOrderErrors, SetupDnsChallengeData, SetupDnsChallengeResponses, SetupDnsChallengeErrors, DeleteDomainData, DeleteDomainResponses, DeleteDomainErrors, GetDomainByIdData, GetDomainByIdResponses, GetDomainByIdErrors, GetChallengeTokenData, GetChallengeTokenResponses, GetChallengeTokenErrors, GetHttpChallengeDebugData, GetHttpChallengeDebugResponses, GetHttpChallengeDebugErrors, ProvisionDomainData, ProvisionDomainResponses, ProvisionDomainErrors, RenewDomainData, RenewDomainResponses, RenewDomainErrors, CheckDomainStatusData, CheckDomainStatusResponses, CheckDomainStatusErrors, ListDomains2Data, ListDomains2Responses, ListDomains2Errors, CreateDomain2Data, CreateDomain2Responses, CreateDomain2Errors, GetDomainByNameData, GetDomainByNameResponses, GetDomainByNameErrors, DeleteDomain2Data, DeleteDomain2Responses, DeleteDomain2Errors, GetDomainData, GetDomainResponses, GetDomainErrors, GetDomainDnsRecordsData, GetDomainDnsRecordsResponses, GetDomainDnsRecordsErrors, SetupDnsData, SetupDnsResponses, SetupDnsErrors, VerifyDomainData, VerifyDomainResponses, VerifyDomainErrors, ListProviders2Data, ListProviders2Responses, ListProviders2Errors, CreateProvider2Data, CreateProvider2Responses, CreateProvider2Errors, DeleteProvider2Data, DeleteProvider2Responses, DeleteProvider2Errors, GetProvider2Data, GetProvider2Responses, GetProvider2Errors, TestProviderData, TestProviderResponses, TestProviderErrors, ListEmailsData, ListEmailsResponses, ListEmailsErrors, SendEmailData, SendEmailResponses, SendEmailErrors, GetEmailStatsData, GetEmailStatsResponses, GetEmailStatsErrors, ValidateEmailData, ValidateEmailResponses, ValidateEmailErrors, GetEmailData, GetEmailResponses, GetEmailErrors, ListServicesData, ListServicesResponses, ListServicesErrors, CreateServiceData, CreateServiceResponses, CreateServiceErrors, ListAvailableContainersData, ListAvailableContainersResponses, ListAvailableContainersErrors, GetServiceBySlugData, GetServiceBySlugResponses, GetServiceBySlugErrors, ImportExternalServiceData, ImportExternalServiceResponses, ImportExternalServiceErrors, ListProjectServicesData, ListProjectServicesResponses, ListProjectServicesErrors, GetProjectServiceEnvironmentVariablesData, GetProjectServiceEnvironmentVariablesResponses, GetProjectServiceEnvironmentVariablesErrors, GetProvidersMetadataData, GetProvidersMetadataResponses, GetProvidersMetadataErrors, GetProviderMetadataData, GetProviderMetadataResponses, GetProviderMetadataErrors, GetServiceTypesData, GetServiceTypesResponses, GetServiceTypesErrors, GetServiceTypeParametersData, GetServiceTypeParametersResponses, GetServiceTypeParametersErrors, DeleteServiceData, DeleteServiceResponses, DeleteServiceErrors, GetServiceData, GetServiceResponses, GetServiceErrors, UpdateServiceData, UpdateServiceResponses, UpdateServiceErrors, GetServicePreviewEnvironmentVariablesMaskedData, GetServicePreviewEnvironmentVariablesMaskedResponses, GetServicePreviewEnvironmentVariablesMaskedErrors, GetServicePreviewEnvironmentVariableNamesData, GetServicePreviewEnvironmentVariableNamesResponses, GetServicePreviewEnvironmentVariableNamesErrors, ListServiceProjectsData, ListServiceProjectsResponses, ListServiceProjectsErrors, LinkServiceToProjectData, LinkServiceToProjectResponses, LinkServiceToProjectErrors, UnlinkServiceFromProjectData, UnlinkServiceFromProjectResponses, UnlinkServiceFromProjectErrors, GetServiceEnvironmentVariablesData, GetServiceEnvironmentVariablesResponses, GetServiceEnvironmentVariablesErrors, GetServiceEnvironmentVariableData, GetServiceEnvironmentVariableResponses, GetServiceEnvironmentVariableErrors, StartServiceData, StartServiceResponses, StartServiceErrors, StopServiceData, StopServiceResponses, StopServiceErrors, UpgradeServiceData, UpgradeServiceResponses, UpgradeServiceErrors, ListRootContainersData, ListRootContainersResponses, ListRootContainersErrors, ListContainersAtPathData, ListContainersAtPathResponses, ListContainersAtPathErrors, ListEntitiesData, ListEntitiesResponses, ListEntitiesErrors, GetEntityInfoData, GetEntityInfoResponses, GetEntityInfoErrors, QueryDataData, QueryDataResponses, QueryDataErrors, DownloadObjectData, DownloadObjectResponses, DownloadObjectErrors, GetContainerInfoData, GetContainerInfoResponses, GetContainerInfoErrors, CheckExplorerSupportData, CheckExplorerSupportResponses, CheckExplorerSupportErrors, GetFileData, GetFileResponses, GetFileErrors, GetIpGeolocationData, GetIpGeolocationResponses, GetIpGeolocationErrors, ListConnectionsData, ListConnectionsResponses, ListConnectionsErrors, DeleteConnectionData, DeleteConnectionResponses, DeleteConnectionErrors, ActivateConnectionData, ActivateConnectionResponses, ActivateConnectionErrors, DeactivateConnectionData, DeactivateConnectionResponses, DeactivateConnectionErrors, ListRepositoriesByConnectionData, ListRepositoriesByConnectionResponses, ListRepositoriesByConnectionErrors, SyncRepositoriesData, SyncRepositoriesResponses, SyncRepositoriesErrors, UpdateConnectionTokenData, UpdateConnectionTokenResponses, UpdateConnectionTokenErrors, ValidateConnectionData, ValidateConnectionResponses, ValidateConnectionErrors, ListGitProvidersData, ListGitProvidersResponses, ListGitProvidersErrors, CreateGitProviderData, CreateGitProviderResponses, CreateGitProviderErrors, CreateGithubPatProviderData, CreateGithubPatProviderResponses, CreateGithubPatProviderErrors, CreateGitlabOauthProviderData, CreateGitlabOauthProviderResponses, CreateGitlabOauthProviderErrors, CreateGitlabPatProviderData, CreateGitlabPatProviderResponses, CreateGitlabPatProviderErrors, DeleteProvider3Data, DeleteProvider3Responses, DeleteProvider3Errors, GetGitProviderData, GetGitProviderResponses, GetGitProviderErrors, ActivateProviderData, ActivateProviderResponses, ActivateProviderErrors, HandleGitProviderOauthCallbackData, HandleGitProviderOauthCallbackErrors, GetProviderConnectionsData, GetProviderConnectionsResponses, GetProviderConnectionsErrors, DeactivateProviderData, DeactivateProviderResponses, DeactivateProviderErrors, CheckProviderDeletionSafetyData, CheckProviderDeletionSafetyResponses, CheckProviderDeletionSafetyErrors, StartGitProviderOauthData, StartGitProviderOauthErrors, DeleteProviderSafelyData, DeleteProviderSafelyResponses, DeleteProviderSafelyErrors, GetPublicRepositoryData, GetPublicRepositoryResponses, GetPublicRepositoryErrors, GetPublicBranchesData, GetPublicBranchesResponses, GetPublicBranchesErrors, DetectPublicPresetsData, DetectPublicPresetsResponses, DetectPublicPresetsErrors, DiscoverWorkloadsData, DiscoverWorkloadsResponses, DiscoverWorkloadsErrors, ExecuteImportData, ExecuteImportResponses, ExecuteImportErrors, CreatePlanData, CreatePlanResponses, CreatePlanErrors, ListSourcesData, ListSourcesResponses, ListSourcesErrors, GetImportStatusData, GetImportStatusResponses, GetImportStatusErrors, GetIncidentData, GetIncidentResponses, GetIncidentErrors, UpdateIncidentStatusData, UpdateIncidentStatusResponses, UpdateIncidentStatusErrors, GetIncidentUpdatesData, GetIncidentUpdatesResponses, GetIncidentUpdatesErrors, AdminListNodesData, AdminListNodesResponses, AdminListNodesErrors, RegisterNodeData, RegisterNodeResponses, RegisterNodeErrors, AdminRemoveNodeData, AdminRemoveNodeResponses, AdminRemoveNodeErrors, AdminGetNodeData, AdminGetNodeResponses, AdminGetNodeErrors, AdminListNodeContainersData, AdminListNodeContainersResponses, AdminListNodeContainersErrors, AdminUndrainNodeData, AdminUndrainNodeResponses, AdminUndrainNodeErrors, AdminDrainStatusData, AdminDrainStatusResponses, AdminDrainStatusErrors, AdminDrainNodeData, AdminDrainNodeResponses, AdminDrainNodeErrors, NodeHeartbeatData, NodeHeartbeatResponses, NodeHeartbeatErrors, GetS3CredentialsData, GetS3CredentialsResponses, GetS3CredentialsErrors, ListIpAccessControlData, ListIpAccessControlResponses, ListIpAccessControlErrors, CreateIpAccessControlData, CreateIpAccessControlResponses, CreateIpAccessControlErrors, CheckIpBlockedData, CheckIpBlockedResponses, CheckIpBlockedErrors, DeleteIpAccessControlData, DeleteIpAccessControlResponses, DeleteIpAccessControlErrors, GetIpAccessControlData, GetIpAccessControlResponses, GetIpAccessControlErrors, UpdateIpAccessControlData, UpdateIpAccessControlResponses, UpdateIpAccessControlErrors, KvDelData, KvDelResponses, KvDelErrors, KvDisableData, KvDisableResponses, KvDisableErrors, KvEnableData, KvEnableResponses, KvEnableErrors, KvExpireData, KvExpireResponses, KvExpireErrors, KvGetData, KvGetResponses, KvGetErrors, KvIncrData, KvIncrResponses, KvIncrErrors, KvKeysData, KvKeysResponses, KvKeysErrors, KvSetData, KvSetResponses, KvSetErrors, KvStatusData, KvStatusResponses, KvStatusErrors, KvTtlData, KvTtlResponses, KvTtlErrors, KvUpdateData, KvUpdateResponses, KvUpdateErrors, ListRoutesData, ListRoutesResponses, ListRoutesErrors, CreateRouteData, CreateRouteResponses, CreateRouteErrors, DeleteRouteData, DeleteRouteResponses, DeleteRouteErrors, GetRouteData, GetRouteResponses, GetRouteErrors, UpdateRouteData, UpdateRouteResponses, UpdateRouteErrors, LogoutData, LogoutResponses, LogoutErrors, GetLogContextData, GetLogContextResponses, GetLogContextErrors, SearchLogsData, SearchLogsResponses, SearchLogsErrors, TailLogsData, TailLogsResponses, TailLogsErrors, DeleteMonitorData, DeleteMonitorResponses, DeleteMonitorErrors, GetMonitorData, GetMonitorResponses, GetMonitorErrors, GetBucketedStatusData, GetBucketedStatusResponses, GetBucketedStatusErrors, GetCurrentMonitorStatusData, GetCurrentMonitorStatusResponses, GetCurrentMonitorStatusErrors, GetUptimeHistoryData, GetUptimeHistoryResponses, GetUptimeHistoryErrors, DeletePreferencesData, DeletePreferencesResponses, DeletePreferencesErrors, GetPreferencesData, GetPreferencesResponses, GetPreferencesErrors, UpdatePreferencesData, UpdatePreferencesResponses, UpdatePreferencesErrors, ListNotificationProvidersData, ListNotificationProvidersResponses, ListNotificationProvidersErrors, CreateNotificationProviderData, CreateNotificationProviderResponses, CreateNotificationProviderErrors, CreateEmailProviderData, CreateEmailProviderResponses, CreateEmailProviderErrors, UpdateEmailProviderData, UpdateEmailProviderResponses, UpdateEmailProviderErrors, CreateSlackProviderData, CreateSlackProviderResponses, CreateSlackProviderErrors, UpdateSlackProviderData, UpdateSlackProviderResponses, UpdateSlackProviderErrors, CreateWebhookProviderData, CreateWebhookProviderResponses, CreateWebhookProviderErrors, UpdateWebhookProviderData, UpdateWebhookProviderResponses, UpdateWebhookProviderErrors, DeleteProvider4Data, DeleteProvider4Responses, DeleteProvider4Errors, GetNotificationProviderData, GetNotificationProviderResponses, GetNotificationProviderErrors, UpdateProvider2Data, UpdateProvider2Responses, UpdateProvider2Errors, TestProvider2Data, TestProvider2Responses, TestProvider2Errors, ListOrdersData, ListOrdersResponses, ListOrdersErrors, QueryGenaiTracesData, QueryGenaiTracesResponses, QueryGenaiTracesErrors, GetGenaiTraceData, GetGenaiTraceResponses, GetGenaiTraceErrors, GetHealthData, GetHealthResponses, GetHealthErrors, ListInsightsData, ListInsightsResponses, ListInsightsErrors, QueryLogsData, QueryLogsResponses, QueryLogsErrors, ListMetricNamesData, ListMetricNamesResponses, ListMetricNamesErrors, QueryMetricsData, QueryMetricsResponses, QueryMetricsErrors, GetPipelineStatsData, GetPipelineStatsResponses, GetPipelineStatsErrors, GetQuotaData, GetQuotaResponses, GetQuotaErrors, QueryTraceSummariesData, QueryTraceSummariesResponses, QueryTraceSummariesErrors, QueryTracesData, QueryTracesResponses, QueryTracesErrors, GetTraceData, GetTraceResponses, GetTraceErrors, IngestLogsData, IngestLogsResponses, IngestLogsErrors, IngestMetricsData, IngestMetricsResponses, IngestMetricsErrors, IngestTracesData, IngestTracesResponses, IngestTracesErrors, IngestLogsByPathData, IngestLogsByPathResponses, IngestLogsByPathErrors, IngestMetricsByPathData, IngestMetricsByPathResponses, IngestMetricsByPathErrors, IngestTracesByPathData, IngestTracesByPathResponses, IngestTracesByPathErrors, HasPerformanceMetricsData, HasPerformanceMetricsResponses, HasPerformanceMetricsErrors, GetPerformanceMetricsData, GetPerformanceMetricsResponses, GetPerformanceMetricsErrors, GetMetricsOverTimeData, GetMetricsOverTimeResponses, GetMetricsOverTimeErrors, GetGroupedPageMetricsData, GetGroupedPageMetricsResponses, GetGroupedPageMetricsErrors, GetAccessInfoData, GetAccessInfoResponses, GetAccessInfoErrors, GetPrivateIpData, GetPrivateIpResponses, GetPrivateIpErrors, GetPublicIpData, GetPublicIpResponses, GetPublicIpErrors, ListPresetsData, ListPresetsResponses, ListPresetsErrors, GeneratePresetDockerfileData, GeneratePresetDockerfileResponses, GeneratePresetDockerfileErrors, GetProjectsData, GetProjectsResponses, GetProjectsErrors, CreateProjectData, CreateProjectResponses, CreateProjectErrors, GetProjectBySlugData, GetProjectBySlugResponses, GetProjectBySlugErrors, CreateProjectFromTemplateData, CreateProjectFromTemplateResponses, CreateProjectFromTemplateErrors, GetProjectStatisticsData, GetProjectStatisticsResponses, GetProjectStatisticsErrors, DeleteProjectData, DeleteProjectResponses, DeleteProjectErrors, GetProjectData, GetProjectResponses, GetProjectErrors, UpdateProjectData, UpdateProjectResponses, UpdateProjectErrors, GetProjectDeploymentsData, GetProjectDeploymentsResponses, GetProjectDeploymentsErrors, GetLastDeploymentData, GetLastDeploymentResponses, GetLastDeploymentErrors, TriggerProjectPipelineData, TriggerProjectPipelineResponses, TriggerProjectPipelineErrors, GetActiveVisitors2Data, GetActiveVisitors2Responses, GetActiveVisitors2Errors, GetAggregatedBucketsData, GetAggregatedBucketsResponses, GetAggregatedBucketsErrors, UpdateAutomaticDeployData, UpdateAutomaticDeployResponses, UpdateAutomaticDeployErrors, ListCustomDomainsForProjectData, ListCustomDomainsForProjectResponses, ListCustomDomainsForProjectErrors, CreateCustomDomainData, CreateCustomDomainResponses, CreateCustomDomainErrors, DeleteCustomDomainData, DeleteCustomDomainResponses, DeleteCustomDomainErrors, GetCustomDomainData, GetCustomDomainResponses, GetCustomDomainErrors, UpdateCustomDomainData, UpdateCustomDomainResponses, UpdateCustomDomainErrors, LinkCustomDomainToCertificateData, LinkCustomDomainToCertificateResponses, LinkCustomDomainToCertificateErrors, UpdateProjectDeploymentConfigData, UpdateProjectDeploymentConfigResponses, UpdateProjectDeploymentConfigErrors, GetDeploymentData, GetDeploymentResponses, GetDeploymentErrors, CancelDeploymentData, CancelDeploymentResponses, CancelDeploymentErrors, GetDeploymentJobsData, GetDeploymentJobsResponses, GetDeploymentJobsErrors, GetDeploymentJobLogsData, GetDeploymentJobLogsResponses, GetDeploymentJobLogsErrors, TailDeploymentJobLogsData, TailDeploymentJobLogsErrors, GetDeploymentOperationsData, GetDeploymentOperationsResponses, GetDeploymentOperationsErrors, ExecuteDeploymentOperationData, ExecuteDeploymentOperationResponses, ExecuteDeploymentOperationErrors, GetDeploymentOperationStatusData, GetDeploymentOperationStatusResponses, GetDeploymentOperationStatusErrors, PauseDeploymentData, PauseDeploymentResponses, PauseDeploymentErrors, PromoteDeploymentData, PromoteDeploymentResponses, PromoteDeploymentErrors, ResumeDeploymentData, ResumeDeploymentResponses, ResumeDeploymentErrors, RollbackToDeploymentData, RollbackToDeploymentResponses, RollbackToDeploymentErrors, TeardownDeploymentData, TeardownDeploymentResponses, TeardownDeploymentErrors, ListDsnsData, ListDsnsResponses, CreateDsnData, CreateDsnResponses, CreateDsnErrors, GetOrCreateDsnData, GetOrCreateDsnResponses, GetOrCreateDsnErrors, RegenerateDsnData, RegenerateDsnResponses, RegenerateDsnErrors, RevokeDsnData, RevokeDsnResponses, RevokeDsnErrors, GetEnvironmentVariablesData, GetEnvironmentVariablesResponses, GetEnvironmentVariablesErrors, CreateEnvironmentVariableData, CreateEnvironmentVariableResponses, CreateEnvironmentVariableErrors, GetEnvironmentVariableValueData, GetEnvironmentVariableValueResponses, GetEnvironmentVariableValueErrors, DeleteEnvironmentVariableData, DeleteEnvironmentVariableResponses, DeleteEnvironmentVariableErrors, UpdateEnvironmentVariableData, UpdateEnvironmentVariableResponses, UpdateEnvironmentVariableErrors, GetEnvironmentsData, GetEnvironmentsResponses, GetEnvironmentsErrors, CreateEnvironmentData, CreateEnvironmentResponses, CreateEnvironmentErrors, DeleteEnvironmentData, DeleteEnvironmentResponses, DeleteEnvironmentErrors, GetEnvironmentData, GetEnvironmentResponses, GetEnvironmentErrors, GetEnvironmentCronsData, GetEnvironmentCronsResponses, GetEnvironmentCronsErrors, GetCronByIdData, GetCronByIdResponses, GetCronByIdErrors, GetCronExecutionsData, GetCronExecutionsResponses, GetCronExecutionsErrors, GetEnvironmentDomainsData, GetEnvironmentDomainsResponses, GetEnvironmentDomainsErrors, AddEnvironmentDomainData, AddEnvironmentDomainResponses, AddEnvironmentDomainErrors, DeleteEnvironmentDomainData, DeleteEnvironmentDomainResponses, DeleteEnvironmentDomainErrors, UpdateEnvironmentSettingsData, UpdateEnvironmentSettingsResponses, UpdateEnvironmentSettingsErrors, SleepEnvironmentData, SleepEnvironmentResponses, SleepEnvironmentErrors, TeardownEnvironmentData, TeardownEnvironmentResponses, TeardownEnvironmentErrors, WakeEnvironmentData, WakeEnvironmentResponses, WakeEnvironmentErrors, GetContainerLogsData, GetContainerLogsErrors, ListContainersData, ListContainersResponses, ListContainersErrors, GetContainerDetailData, GetContainerDetailResponses, GetContainerDetailErrors, GetContainerLogsByIdData, GetContainerLogsByIdErrors, GetContainerMetricsData, GetContainerMetricsResponses, GetContainerMetricsErrors, StreamContainerMetricsData, StreamContainerMetricsResponses, StreamContainerMetricsErrors, RestartContainerData, RestartContainerResponses, RestartContainerErrors, StartContainerData, StartContainerResponses, StartContainerErrors, StopContainerData, StopContainerResponses, StopContainerErrors, DeployFromImageData, DeployFromImageResponses, DeployFromImageErrors, DeployFromImageUploadData, DeployFromImageUploadResponses, DeployFromImageUploadErrors, DeployFromStaticData, DeployFromStaticResponses, DeployFromStaticErrors, GetErrorDashboardStatsData, GetErrorDashboardStatsResponses, GetErrorDashboardStatsErrors, ListErrorGroupsData, ListErrorGroupsResponses, ListErrorGroupsErrors, GetErrorGroupData, GetErrorGroupResponses, GetErrorGroupErrors, UpdateErrorGroupData, UpdateErrorGroupResponses, UpdateErrorGroupErrors, ListErrorEventsData, ListErrorEventsResponses, ListErrorEventsErrors, GetErrorEventData, GetErrorEventResponses, GetErrorEventErrors, GetErrorStatsData, GetErrorStatsResponses, GetErrorStatsErrors, GetErrorTimeSeriesData, GetErrorTimeSeriesResponses, GetErrorTimeSeriesErrors, GetEventsCount2Data, GetEventsCount2Responses, GetEventsCount2Errors, GetEventTypeBreakdownData, GetEventTypeBreakdownResponses, GetEventTypeBreakdownErrors, RecordConsoleEventData, RecordConsoleEventResponses, RecordConsoleEventErrors, GetPropertyBreakdownData, GetPropertyBreakdownResponses, GetPropertyBreakdownErrors, GetPropertyTimelineData, GetPropertyTimelineResponses, GetPropertyTimelineErrors, GetEventsTimelineData, GetEventsTimelineResponses, GetEventsTimelineErrors, GetUniqueEventsData, GetUniqueEventsResponses, GetUniqueEventsErrors, ListExternalImagesData, ListExternalImagesResponses, ListExternalImagesErrors, RegisterExternalImageData, RegisterExternalImageResponses, RegisterExternalImageErrors, DeleteExternalImageData, DeleteExternalImageResponses, DeleteExternalImageErrors, GetExternalImageData, GetExternalImageResponses, GetExternalImageErrors, ListFunnelsData, ListFunnelsResponses, ListFunnelsErrors, CreateFunnelData, CreateFunnelResponses, CreateFunnelErrors, PreviewFunnelMetricsData, PreviewFunnelMetricsResponses, PreviewFunnelMetricsErrors, DeleteFunnelData, DeleteFunnelResponses, DeleteFunnelErrors, UpdateFunnelData, UpdateFunnelResponses, UpdateFunnelErrors, GetFunnelMetricsData, GetFunnelMetricsResponses, GetFunnelMetricsErrors, UpdateGitSettingsData, UpdateGitSettingsResponses, UpdateGitSettingsErrors, HasErrorGroupsData, HasErrorGroupsResponses, HasErrorGroupsErrors, HasAnalyticsEventsData, HasAnalyticsEventsResponses, HasAnalyticsEventsErrors, GetHourlyVisitsData, GetHourlyVisitsResponses, GetHourlyVisitsErrors, ListExternalImages2Data, ListExternalImages2Responses, ListExternalImages2Errors, PushExternalImageData, PushExternalImageResponses, PushExternalImageErrors, GetExternalImage2Data, GetExternalImage2Responses, GetExternalImage2Errors, ListIncidentsData, ListIncidentsResponses, ListIncidentsErrors, CreateIncidentData, CreateIncidentResponses, CreateIncidentErrors, GetBucketedIncidentsData, GetBucketedIncidentsResponses, GetBucketedIncidentsErrors, PurgeProjectLogsData, PurgeProjectLogsResponses, PurgeProjectLogsErrors, ListMonitorsData, ListMonitorsResponses, ListMonitorsErrors, CreateMonitorData, CreateMonitorResponses, CreateMonitorErrors, DeleteReleaseSourceMapsData, DeleteReleaseSourceMapsResponses, DeleteReleaseSourceMapsErrors, ListSourceMapsData, ListSourceMapsResponses, ListSourceMapsErrors, UploadSourceMapData, UploadSourceMapResponses, UploadSourceMapErrors, UpdateProjectSettingsData, UpdateProjectSettingsResponses, UpdateProjectSettingsErrors, ListReleasesData, ListReleasesResponses, ListReleasesErrors, DeleteSourceMapData, DeleteSourceMapResponses, DeleteSourceMapErrors, ListStaticBundlesData, ListStaticBundlesResponses, ListStaticBundlesErrors, DeleteStaticBundleData, DeleteStaticBundleResponses, DeleteStaticBundleErrors, GetStaticBundleData, GetStaticBundleResponses, GetStaticBundleErrors, GetStatusOverviewData, GetStatusOverviewResponses, GetStatusOverviewErrors, GetUniqueCountsData, GetUniqueCountsResponses, GetUniqueCountsErrors, UploadStaticBundleData, UploadStaticBundleResponses, UploadStaticBundleErrors, ListProjectScansData, ListProjectScansResponses, ListProjectScansErrors, TriggerScanData, TriggerScanResponses, TriggerScanErrors, GetLatestScansPerEnvironmentData, GetLatestScansPerEnvironmentResponses, GetLatestScansPerEnvironmentErrors, GetLatestScanData, GetLatestScanResponses, GetLatestScanErrors, ListWebhooksData, ListWebhooksResponses, ListWebhooksErrors, CreateWebhookData, CreateWebhookResponses, CreateWebhookErrors, DeleteWebhookData, DeleteWebhookResponses, DeleteWebhookErrors, GetWebhookData, GetWebhookResponses, GetWebhookErrors, UpdateWebhookData, UpdateWebhookResponses, UpdateWebhookErrors, ListDeliveriesData, ListDeliveriesResponses, ListDeliveriesErrors, GetDeliveryData, GetDeliveryResponses, GetDeliveryErrors, RetryDeliveryData, RetryDeliveryResponses, RetryDeliveryErrors, GetProxyLogsData, GetProxyLogsResponses, GetProxyLogsErrors, GetProxyLogByRequestIdData, GetProxyLogByRequestIdResponses, GetProxyLogByRequestIdErrors, GetTimeBucketStatsData, GetTimeBucketStatsResponses, GetTimeBucketStatsErrors, GetTodayStatsData, GetTodayStatsResponses, GetTodayStatsErrors, GetProxyLogByIdData, GetProxyLogByIdResponses, GetProxyLogByIdErrors, ListSyncedRepositoriesData, ListSyncedRepositoriesResponses, ListSyncedRepositoriesErrors, GetRepositoryByNameData, GetRepositoryByNameResponses, GetRepositoryByNameErrors, GetAllRepositoriesByNameData, GetAllRepositoriesByNameResponses, GetAllRepositoriesByNameErrors, GetRepositoryPresetByNameData, GetRepositoryPresetByNameResponses, GetRepositoryPresetByNameErrors, GetRepositoryBranchesData, GetRepositoryBranchesResponses, GetRepositoryBranchesErrors, GetRepositoryTagsData, GetRepositoryTagsResponses, GetRepositoryTagsErrors, GetRepositoryPresetLiveData, GetRepositoryPresetLiveResponses, GetRepositoryPresetLiveErrors, GetBranchesByRepositoryIdData, GetBranchesByRepositoryIdResponses, GetBranchesByRepositoryIdErrors, ListCommitsByRepositoryIdData, ListCommitsByRepositoryIdResponses, ListCommitsByRepositoryIdErrors, CheckCommitExistsData, CheckCommitExistsResponses, CheckCommitExistsErrors, GetTagsByRepositoryIdData, GetTagsByRepositoryIdResponses, GetTagsByRepositoryIdErrors, GetProjectSessionReplaysData, GetProjectSessionReplaysResponses, GetProjectSessionReplaysErrors, GetSessionEvents2Data, GetSessionEvents2Responses, GetSessionEvents2Errors, GetSettingsData, GetSettingsResponses, GetSettingsErrors, UpdateSettingsData, UpdateSettingsResponses, UpdateSettingsErrors, RevokeJoinTokenData, RevokeJoinTokenResponses, RevokeJoinTokenErrors, GenerateJoinTokenData, GenerateJoinTokenResponses, GenerateJoinTokenErrors, GetJoinTokenStatusData, GetJoinTokenStatusResponses, GetJoinTokenStatusErrors, GetPublicSettingsData, GetPublicSettingsResponses, GetPublicSettingsErrors, ListTemplatesData, ListTemplatesResponses, ListTemplatesErrors, ListTemplateTagsData, ListTemplateTagsResponses, ListTemplateTagsErrors, GetTemplateData, GetTemplateResponses, GetTemplateErrors, GetCurrentUserData, GetCurrentUserResponses, GetCurrentUserErrors, ListUsersData, ListUsersResponses, ListUsersErrors, CreateUserData, CreateUserResponses, CreateUserErrors, UpdateSelfData, UpdateSelfResponses, UpdateSelfErrors, DisableMfaData, DisableMfaResponses, DisableMfaErrors, SetupMfaData, SetupMfaResponses, SetupMfaErrors, VerifyAndEnableMfaData, VerifyAndEnableMfaResponses, VerifyAndEnableMfaErrors, DeleteUserData, DeleteUserResponses, DeleteUserErrors, UpdateUserData, UpdateUserResponses, UpdateUserErrors, RestoreUserData, RestoreUserResponses, RestoreUserErrors, AssignRoleData, AssignRoleResponses, AssignRoleErrors, RemoveRoleData, RemoveRoleResponses, RemoveRoleErrors, GetVisitorSessions2Data, GetVisitorSessions2Responses, GetVisitorSessions2Errors, DeleteSessionReplayData, DeleteSessionReplayResponses, DeleteSessionReplayErrors, GetSessionReplayData, GetSessionReplayResponses, GetSessionReplayErrors, UpdateSessionDurationData, UpdateSessionDurationResponses, UpdateSessionDurationErrors, GetSessionReplayEventsData, GetSessionReplayEventsResponses, GetSessionReplayEventsErrors, AddEventsData, AddEventsResponses, AddEventsErrors, DeleteScanData, DeleteScanResponses, DeleteScanErrors, GetScanData, GetScanResponses, GetScanErrors, GetScanVulnerabilitiesData, GetScanVulnerabilitiesResponses, GetScanVulnerabilitiesErrors, ListEventTypesData, ListEventTypesResponses, TriggerWeeklyDigestData, TriggerWeeklyDigestResponses, TriggerWeeklyDigestErrors, ListExternalPluginsData, ListExternalPluginsResponses, ReloadPluginsData, ReloadPluginsResponses, ReloadPluginsErrors, IngestSentryEnvelopeData, IngestSentryEnvelopeResponses, IngestSentryEnvelopeErrors, IngestSentryEventData, IngestSentryEventResponses, IngestSentryEventErrors, ListAuditLogsData, ListAuditLogsResponses, ListAuditLogsErrors, GetAuditLogData, GetAuditLogResponses, GetAuditLogErrors } from './types.gen'; import { client } from './client.gen'; export type Options = ClientOptions & { @@ -3376,6 +3376,19 @@ export const nodeHeartbeat = (options: Opt }); }; +/** + * Get decrypted S3 credentials for a backup/restore operation. + * Agents call this endpoint to receive the S3 credentials they need to upload + * or download backups. The credentials are decrypted from the stored S3 source + * and returned over the authenticated TLS/WireGuard channel. + */ +export const getS3Credentials = (options: Options) => { + return (options.client ?? client).get({ + url: '/internal/nodes/{node_id}/s3-credentials/{s3_source_id}', + ...options + }); +}; + /** * List all IP access control rules */ @@ -5335,8 +5348,8 @@ export const updateEnvironmentSettings = ( /** * Sleep an on-demand environment - * Manually put an on-demand environment to sleep. Sets `sleeping = true`. - * The proxy will stop sending traffic and the idle sweep will stop containers. + * Manually put an on-demand environment to sleep. Stops containers and sets + * `sleeping = true`. If no OnDemandWaker is available, falls back to DB flag only. */ export const sleepEnvironment = (options: Options) => { return (options.client ?? client).post({ @@ -5358,8 +5371,9 @@ export const teardownEnvironment = (option /** * Wake a sleeping on-demand environment * Manually wake an environment that has been put to sleep by the on-demand - * idle timeout. Sets `sleeping = false` on the environment. The proxy will - * detect the state change and start containers on the next request. + * idle timeout. Starts containers, waits for health checks, then sets + * `sleeping = false`. If no OnDemandWaker is available (proxy not running + * in same process), falls back to setting the DB flag only. */ export const wakeEnvironment = (options: Options) => { return (options.client ?? client).post({ @@ -6881,6 +6895,18 @@ export const getJoinTokenStatus = (options }); }; +/** + * Get public settings (no authentication required) + * Returns non-sensitive feature flags like demo mode status. + * This endpoint is intentionally unauthenticated so the login page can use it. + */ +export const getPublicSettings = (options?: Options) => { + return (options?.client ?? client).get({ + url: '/settings/public', + ...options + }); +}; + /** * List all available templates * Returns a list of all public templates, optionally filtered by tag or featured status. diff --git a/web/src/api/client/types.gen.ts b/web/src/api/client/types.gen.ts index 5c9d216e..48b77a03 100644 --- a/web/src/api/client/types.gen.ts +++ b/web/src/api/client/types.gen.ts @@ -703,6 +703,20 @@ export type CliLoginRequest = { username: string; }; +/** + * Request spec for a single cluster member. + */ +export type ClusterMemberRequest = { + /** + * Target worker node ID. Omit or null to run on the control plane. + */ + node_id?: number | null; + /** + * Service-type-specific role (e.g., "monitor", "primary", "replica") + */ + role: string; +}; + export type CommitExistsResponse = { commit_sha?: string | null; exists: boolean; @@ -1212,11 +1226,23 @@ export type CreateEnvironmentVariableRequest = { }; export type CreateExternalServiceRequest = { + /** + * Cluster member specifications. Required when topology is "cluster". + */ + members?: Array; name: string; + /** + * Target node ID for the service. Omit or null to run on the control plane. + */ + node_id?: number | null; parameters: { [key: string]: unknown; }; service_type: ServiceTypeRoute; + /** + * Service topology: "standalone" (default) or "cluster" (HA multi-member). + */ + topology?: string; version?: string | null; }; @@ -3727,9 +3753,21 @@ export type ExternalServiceInfo = { connection_info?: string | null; created_at: string; id: number; + /** + * Cluster members (empty for standalone services). + */ + members?: Array; name: string; + /** + * Node ID where the service runs. Null means control plane (local). + */ + node_id?: number | null; service_type: ServiceTypeRoute; status: string; + /** + * Service topology: "standalone" (single container) or "cluster" (HA multi-member). + */ + topology: string; updated_at: string; version?: string | null; }; @@ -6498,6 +6536,24 @@ export type PaginationParams = { per_page?: number; }; +/** + * Password protection configuration + * + * When enabled, the proxy shows an HTML password form before allowing access. + * After the user enters the correct password, an HMAC-signed cookie is set + * so subsequent requests pass through without re-entering the password. + */ +export type PasswordProtectionConfig = { + /** + * Whether password protection is enabled + */ + enabled: boolean; + /** + * The bcrypt-hashed password (never stored or returned in plaintext) + */ + passwordHash: string; +}; + export type PathVisitors = { name: string; percentage: number; @@ -7288,6 +7344,16 @@ export type PublicRepositoryInfo = { stars: number; }; +/** + * Public settings response containing only non-sensitive feature flags + */ +export type PublicSettingsResponse = { + /** + * Whether demo mode is enabled + */ + demo_enabled: boolean; +}; + export type PurgeLogsRequest = { /** * Delete all logs before this timestamp (ISO 8601) @@ -7764,6 +7830,18 @@ export type RunExternalServiceBackupRequest = { s3_source_id: number; }; +/** + * S3 credentials distributed to agents for backup/restore operations. + */ +export type S3CredentialsResponse = { + access_key_id: string; + bucket_name: string; + endpoint?: string | null; + force_path_style: boolean; + region: string; + secret_key: string; +}; + /** * Response type for S3 source */ @@ -7893,6 +7971,7 @@ export type SecurityConfig = { enabled?: boolean | null; geoRestrictions?: null | GeoRestrictionsConfig; headers?: null | SecurityHeadersConfig; + passwordProtection?: null | PasswordProtectionConfig; rateLimiting?: null | RateLimitConfig; }; @@ -8086,6 +8165,20 @@ export type ServiceAccessInfo = { */ export type ServiceAction = 'create' | 'link-external' | 'skip'; +/** + * Public info about a cluster member. + */ +export type ServiceMemberInfo = { + container_name: string; + hostname?: string | null; + id: number; + node_id?: number | null; + ordinal: number; + port?: number | null; + role: string; + status: string; +}; + export type ServiceParameter = { choices?: Array | null; default_value?: string | null; @@ -9522,6 +9615,13 @@ export type UpdateEnvironmentSettingsRequest = { * idle_timeout_seconds of no traffic and started on the next request. */ on_demand?: boolean | null; + /** + * Set a password to protect this environment. The proxy will show an HTML + * password form before allowing access. The password is bcrypt-hashed + * server-side and never stored in plaintext. + * Send an empty string to remove password protection. + */ + password?: string | null; /** * Enable/disable performance metrics collection */ @@ -18486,6 +18586,46 @@ export type NodeHeartbeatResponses = { export type NodeHeartbeatResponse = NodeHeartbeatResponses[keyof NodeHeartbeatResponses]; +export type GetS3CredentialsData = { + body?: never; + path: { + /** + * Node ID + */ + node_id: number; + /** + * S3 source ID + */ + s3_source_id: number; + }; + query?: never; + url: '/internal/nodes/{node_id}/s3-credentials/{s3_source_id}'; +}; + +export type GetS3CredentialsErrors = { + /** + * Unauthorized + */ + 401: unknown; + /** + * S3 source not found + */ + 404: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type GetS3CredentialsResponses = { + /** + * S3 credentials + */ + 200: S3CredentialsResponse; +}; + +export type GetS3CredentialsResponse = GetS3CredentialsResponses[keyof GetS3CredentialsResponses]; + export type ListIpAccessControlData = { body?: never; path?: never; @@ -23380,6 +23520,10 @@ export type SleepEnvironmentErrors = { * Environment not found */ 404: unknown; + /** + * Too many state transitions, retry after cooldown + */ + 429: unknown; /** * Internal server error */ @@ -23456,6 +23600,10 @@ export type WakeEnvironmentErrors = { * Environment not found */ 404: unknown; + /** + * Too many state transitions, retry after cooldown + */ + 429: unknown; /** * Internal server error */ @@ -27781,6 +27929,29 @@ export type GetJoinTokenStatusResponses = { export type GetJoinTokenStatusResponse = GetJoinTokenStatusResponses[keyof GetJoinTokenStatusResponses]; +export type GetPublicSettingsData = { + body?: never; + path?: never; + query?: never; + url: '/settings/public'; +}; + +export type GetPublicSettingsErrors = { + /** + * Internal server error + */ + 500: unknown; +}; + +export type GetPublicSettingsResponses = { + /** + * Public settings + */ + 200: PublicSettingsResponse; +}; + +export type GetPublicSettingsResponse = GetPublicSettingsResponses[keyof GetPublicSettingsResponses]; + export type ListTemplatesData = { body?: never; path?: never; diff --git a/web/src/components/environments/EnvironmentSidebar.tsx b/web/src/components/environments/EnvironmentSidebar.tsx index 2e6ba6f3..6a4f03ec 100644 --- a/web/src/components/environments/EnvironmentSidebar.tsx +++ b/web/src/components/environments/EnvironmentSidebar.tsx @@ -97,9 +97,9 @@ export function EnvironmentSidebar({ - + {item.title} ) diff --git a/web/src/components/forms/JsonSchemaForm.tsx b/web/src/components/forms/JsonSchemaForm.tsx index 06a15b5d..3340fa94 100644 --- a/web/src/components/forms/JsonSchemaForm.tsx +++ b/web/src/components/forms/JsonSchemaForm.tsx @@ -89,6 +89,11 @@ interface JsonSchemaFormProps { * @default [['host', 'port'], ['username', 'password']] */ pairedFields?: [string, string][] + + /** + * Fields to hide from the form (they won't be rendered or submitted) + */ + hiddenFields?: string[] } /** @@ -108,11 +113,15 @@ export function JsonSchemaForm({ ['host', 'port'], ['username', 'password'], ], + hiddenFields = [], }: JsonSchemaFormProps) { - // Get list of property names in order + // Get list of property names in order, excluding hidden fields const propertyNames = useMemo( - () => Object.keys(schema.properties), - [schema.properties] + () => + Object.keys(schema.properties).filter( + (name) => !hiddenFields.includes(name) + ), + [schema.properties, hiddenFields] ) // Create Zod schema from JSON Schema @@ -191,6 +200,9 @@ export function JsonSchemaForm({ const cleanedValues: Record = {} Object.entries(values).forEach(([key, value]) => { + // Skip hidden fields + if (hiddenFields.includes(key)) return + const prop = schema.properties[key] const types = Array.isArray(prop.type) ? prop.type : [prop.type] const isNullable = types.includes('null') diff --git a/web/src/components/project/ProjectDetailHeader.tsx b/web/src/components/project/ProjectDetailHeader.tsx index fb0752e8..07b0dd61 100644 --- a/web/src/components/project/ProjectDetailHeader.tsx +++ b/web/src/components/project/ProjectDetailHeader.tsx @@ -32,7 +32,7 @@ export function ProjectDetailHeader({ } return ( -
+
diff --git a/web/src/components/project/settings/environments/EnvironmentConfigurationCard.tsx b/web/src/components/project/settings/environments/EnvironmentConfigurationCard.tsx index 8ba96292..3a260899 100644 --- a/web/src/components/project/settings/environments/EnvironmentConfigurationCard.tsx +++ b/web/src/components/project/settings/environments/EnvironmentConfigurationCard.tsx @@ -26,7 +26,7 @@ import { SelectValue, } from '@/components/ui/select' import { useMutation, useQuery } from '@tanstack/react-query' -import { GitBranch, Loader2, Moon, Network, Plus, Shield, X } from 'lucide-react' +import { GitBranch, KeyRound, Loader2, Moon, Network, Plus, Shield, X } from 'lucide-react' import { useEffect, useState } from 'react' import { toast } from 'sonner' @@ -81,6 +81,8 @@ export function EnvironmentConfigurationCard({ on_demand: environment.deployment_config?.onDemand ?? false, idle_timeout_seconds: environment.deployment_config?.idleTimeoutSeconds?.toString() ?? '300', wake_timeout_seconds: environment.deployment_config?.wakeTimeoutSeconds?.toString() ?? '30', + password_enabled: environment.deployment_config?.security?.passwordProtection?.enabled ?? false, + password: '', security: { enabled: environment.deployment_config?.security?.enabled ?? false, headers: { @@ -132,6 +134,8 @@ export function EnvironmentConfigurationCard({ on_demand: environment.deployment_config?.onDemand ?? false, idle_timeout_seconds: environment.deployment_config?.idleTimeoutSeconds?.toString() ?? '300', wake_timeout_seconds: environment.deployment_config?.wakeTimeoutSeconds?.toString() ?? '30', + password_enabled: environment.deployment_config?.security?.passwordProtection?.enabled ?? false, + password: '', security: { enabled: environment.deployment_config?.security?.enabled ?? false, headers: { @@ -213,6 +217,11 @@ export function EnvironmentConfigurationCard({ ? parseInt(formData.wake_timeout_seconds) : null, security: formData.security, + password: formData.password_enabled + ? (formData.password || null) + : (formData.password_enabled === false && environment.deployment_config?.security?.passwordProtection?.enabled + ? '' + : null), }, }) } @@ -394,12 +403,12 @@ export function EnvironmentConfigurationCard({

On-Demand (Scale-to-Zero)

-
-
+
+

- Automatically stop containers after a period of inactivity - and start them when a new request arrives. + Automatically stop containers after idle timeout + and restart on the next request.

{/* Protected environment toggle */} -
-
+
+

- Git pushes will not auto-deploy to this environment. - Deployments must be promoted from another environment. + Git pushes will not auto-deploy. Deployments must be promoted from another environment.

{/* Anti-affinity toggle */} -
-
+
+

- Spread replicas across different nodes. When enabled, - no two replicas of this environment land on the same - node (best-effort if more replicas than nodes). + Spread replicas across different nodes (best-effort).

+
{ if (newLabelKey.trim() && newLabelValue.trim()) { @@ -673,8 +680,8 @@ export function EnvironmentConfigurationCard({
-
-
+
+

Enable attack mode for development/testing @@ -691,8 +698,8 @@ export function EnvironmentConfigurationCard({ />

-
-
+
+
@@ -749,6 +756,57 @@ export function EnvironmentConfigurationCard({
)} + {/* Password Protection */} +
+
+ +

+ Require a password to access this environment. +

+
+ + setFormData((prev) => ({ + ...prev, + password_enabled: checked, + password: checked ? prev.password : '', + })) + } + /> +
+ + {formData.password_enabled && ( +
+
+ + + setFormData((prev) => ({ + ...prev, + password: e.target.value, + })) + } + placeholder={ + environment.deployment_config?.security?.passwordProtection?.enabled + ? 'Leave empty to keep current password' + : 'Enter a password' + } + /> +

+ {environment.deployment_config?.security?.passwordProtection?.enabled + ? 'A password is currently set. Enter a new one to change it, or leave empty to keep the current password.' + : 'Set a password that visitors must enter to access this environment. The password is securely hashed.'} +

+
+
+ )} +

Rate Limiting

@@ -809,6 +867,7 @@ export function EnvironmentConfigurationCard({ diff --git a/web/src/components/storage/CreateServiceButton.tsx b/web/src/components/storage/CreateServiceButton.tsx index f660c010..175b604a 100644 --- a/web/src/components/storage/CreateServiceButton.tsx +++ b/web/src/components/storage/CreateServiceButton.tsx @@ -12,7 +12,15 @@ import { getProvidersMetadataOptions } from '@/api/client/@tanstack/react-query. import { useQuery } from '@tanstack/react-query' import { useNavigate } from 'react-router-dom' -export function CreateServiceButton({ onSuccess }: { onSuccess?: () => void }) { +export function CreateServiceButton({ + onSuccess, + open, + onOpenChange, +}: { + onSuccess?: () => void + open?: boolean + onOpenChange?: (open: boolean) => void +}) { const navigate = useNavigate() const { data: providers, isLoading } = useQuery({ @@ -20,7 +28,7 @@ export function CreateServiceButton({ onSuccess }: { onSuccess?: () => void }) { }) return ( - +
{inlineTestResult && (
{inlineTestResult.success ? ( - + ) : ( - + )} - + {inlineTestResult.success ? 'Key is valid' : inlineTestResult.error || 'Key test failed'} diff --git a/web/src/pages/CreateServiceNew.tsx b/web/src/pages/CreateServiceNew.tsx index e95aa92c..fc91d6b8 100644 --- a/web/src/pages/CreateServiceNew.tsx +++ b/web/src/pages/CreateServiceNew.tsx @@ -1,17 +1,29 @@ import { + adminListNodesOptions, createServiceMutation, getProviderMetadataOptions, getServiceTypeParametersOptions, } from '@/api/client/@tanstack/react-query.gen' -import { ServiceTypeRoute } from '@/api/client/types.gen' +import { + ClusterMemberRequest, + NodeInfoResponse, + ServiceTypeRoute, +} from '@/api/client/types.gen' import { Button } from '@/components/ui/button' import { JsonSchemaForm } from '@/components/forms/JsonSchemaForm' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' import { useBreadcrumbs } from '@/contexts/BreadcrumbContext' import { useMutation, useQuery } from '@tanstack/react-query' import { customAlphabet } from 'nanoid' -import { ArrowLeft } from 'lucide-react' +import { ArrowLeft, Plus, Server, Trash2 } from 'lucide-react' import { useEffect, useMemo, useState } from 'react' import { Link, useNavigate, useSearchParams } from 'react-router-dom' import { toast } from 'sonner' @@ -19,6 +31,221 @@ import { toast } from 'sonner' // Create a custom nanoid with lowercase alphanumeric characters const generateId = customAlphabet('0123456789abcdefghijklmnopqrstuvwxyz', 4) +/** Service types that support HA cluster topology */ +const CLUSTER_SERVICE_TYPES: ServiceTypeRoute[] = ['postgres'] + +/** Default cluster roles for each service type */ +const DEFAULT_CLUSTER_ROLES: Record = { + postgres: ['monitor', 'primary', 'replica'], +} + +const ROLE_DESCRIPTIONS: Record = { + monitor: 'pg_auto_failover monitor — coordinates failover', + primary: 'Read-write primary node', + replica: 'Read-only hot standby', +} + +function ClusterMemberConfig({ + members, + onMembersChange, + nodes, + serviceType, +}: { + members: ClusterMemberRequest[] + onMembersChange: (members: ClusterMemberRequest[]) => void + nodes: NodeInfoResponse[] + serviceType: string +}) { + const roles = DEFAULT_CLUSTER_ROLES[serviceType] || [] + + const addMember = () => { + // Default to replica if we already have all required roles + const hasMonitor = members.some((m) => m.role === 'monitor') + const hasPrimary = members.some((m) => m.role === 'primary') + const defaultRole = !hasMonitor + ? 'monitor' + : !hasPrimary + ? 'primary' + : 'replica' + onMembersChange([...members, { role: defaultRole, node_id: null }]) + } + + const removeMember = (index: number) => { + onMembersChange(members.filter((_, i) => i !== index)) + } + + const updateMember = ( + index: number, + field: keyof ClusterMemberRequest, + value: string | number | null + ) => { + const updated = [...members] + if (field === 'node_id') { + updated[index] = { + ...updated[index], + node_id: value === null ? null : Number(value), + } + } else { + updated[index] = { ...updated[index], [field]: value as string } + } + onMembersChange(updated) + } + + // Validation: warn about missing required roles + const hasMonitor = members.some((m) => m.role === 'monitor') + const hasPrimary = members.some((m) => m.role === 'primary') + const hasReplica = members.some((m) => m.role === 'replica') + const allHaveNodes = members.every((m) => m.node_id !== null) + + return ( +
+
+
+ +

+ Assign each member to a different node for true HA +

+
+ +
+ + {members.length === 0 && ( +
+ No members configured. Add at least a monitor, primary, and replica. +
+ )} + +
+ {members.map((member, index) => ( +
+
+ {index + 1} +
+ +
+
+ + + {ROLE_DESCRIPTIONS[member.role] && ( +

+ {ROLE_DESCRIPTIONS[member.role]} +

+ )} +
+ +
+ + +
+
+ + +
+ ))} +
+ + {members.length > 0 && (!hasMonitor || !hasPrimary || !hasReplica) && ( +
+ A PostgreSQL cluster requires at least:{' '} + + 1 monitor + + ,{' '} + + 1 primary + + ,{' '} + + 1 replica + +
+ )} + + {members.length >= 3 && + hasMonitor && + hasPrimary && + hasReplica && + !allHaveNodes && ( +
+ For true high availability, assign each member to a different node. + Members on the control plane share the same machine. +
+ )} + + {members.length >= 3 && hasMonitor && hasPrimary && hasReplica && allHaveNodes && ( +
+ Cluster configuration looks good. Members will communicate via their + private addresses. +
+ )} +
+ ) +} + export function CreateService() { const navigate = useNavigate() const [searchParams] = useSearchParams() @@ -31,6 +258,30 @@ export function CreateService() { ) const [serviceName, setServiceName] = useState(defaultName) + const supportsCluster = + serviceType !== null && + CLUSTER_SERVICE_TYPES.includes(serviceType as ServiceTypeRoute) + const [topology, setTopology] = useState<'standalone' | 'cluster'>( + 'standalone' + ) + const [clusterMembers, setClusterMembers] = useState( + [] + ) + + // When switching to cluster topology, pre-populate default members + useEffect(() => { + if (topology === 'cluster' && clusterMembers.length === 0 && serviceType) { + const defaultRoles = DEFAULT_CLUSTER_ROLES[serviceType] + if (defaultRoles) { + setClusterMembers( + defaultRoles.map((role) => ({ role, node_id: null })) + ) + } + } + if (topology === 'standalone') { + setClusterMembers([]) + } + }, [topology, serviceType]) useEffect(() => { setBreadcrumbs([ @@ -39,6 +290,19 @@ export function CreateService() { ]) }, [setBreadcrumbs]) + // Fetch available nodes for cluster member assignment + const { data: nodesResponse } = useQuery({ + ...adminListNodesOptions(), + enabled: supportsCluster && topology === 'cluster', + }) + const nodes = useMemo( + () => + (nodesResponse?.nodes ?? []).filter( + (n: NodeInfoResponse) => n.status === 'active' + ), + [nodesResponse] + ) + // Fetch provider metadata for display const { data: providerMetadata } = useQuery({ ...getProviderMetadataOptions({ @@ -65,7 +329,11 @@ export function CreateService() { errorTitle: 'Failed to create service', }, onSuccess: (data) => { - toast.success('Service created successfully') + if (data.status === 'creating') { + toast.success('Cluster creation started — tracking progress...') + } else { + toast.success('Service created successfully') + } navigate(`/storage/${data.id}`) }, }) @@ -89,11 +357,22 @@ export function CreateService() { } }) + // For cluster topology, remove standalone-only params so the backend uses HA defaults + if (topology === 'cluster') { + delete cleanedParameters['docker_image'] + delete cleanedParameters['host'] + delete cleanedParameters['port'] + } + await createServiceMut.mutateAsync({ body: { service_type: serviceType as ServiceTypeRoute, name: serviceName, parameters: cleanedParameters, + ...(topology === 'cluster' && { + topology: 'cluster', + members: clusterMembers, + }), }, }) } @@ -198,6 +477,69 @@ export function CreateService() {

+ {/* Topology Selector (only for service types that support clustering) */} + {supportsCluster && ( +
+
+ +

+ Choose standalone for a single instance, or cluster for + high-availability with automatic failover +

+
+
+ + +
+ + {topology === 'cluster' && ( + <> +

+ Docker image will be set to{' '} + + gotempsh/postgres-ha:18-bookworm + {' '} + automatically (includes pg_auto_failover). +

+ + + )} +
+ )} + {/* JSON Schema Form for Parameters */} navigate('/storage')} submitText="Create Service" isSubmitting={createServiceMut.isPending} + hiddenFields={ + topology === 'cluster' + ? ['host', 'port', 'docker_image'] + : [] + } />
diff --git a/web/src/pages/EnvironmentDashboard.tsx b/web/src/pages/EnvironmentDashboard.tsx index c306d478..edfc1413 100644 --- a/web/src/pages/EnvironmentDashboard.tsx +++ b/web/src/pages/EnvironmentDashboard.tsx @@ -144,7 +144,7 @@ export function EnvironmentDashboard({ {/* Main Content */}
-
{renderContent()}
+
{renderContent()}
) diff --git a/web/src/pages/EnvironmentsTabsView.tsx b/web/src/pages/EnvironmentsTabsView.tsx index 032c9c08..75fa092b 100644 --- a/web/src/pages/EnvironmentsTabsView.tsx +++ b/web/src/pages/EnvironmentsTabsView.tsx @@ -74,9 +74,9 @@ export function EnvironmentsTabsView({ return (
-
-
-

Environments

+
+
+

Environments

Manage and monitor your environments

@@ -108,10 +108,10 @@ export function EnvironmentsTabsView({ onValueChange={(value) => setSelectedEnvId(parseInt(value))} className="flex flex-col h-full" > -
+
-
-

Environments

+
+

Environments

- +
+ + {environments.map((env) => ( + + {env.name} + + ))} + + { + await createEnv.mutateAsync({ + path: { project_id: project.id || 0 }, + body: values, + }) + }} + /> +
+ {environments.map((env) => ( { const [isLoading, setIsLoading] = useState(false) const [isDemoLoading, setIsDemoLoading] = useState(false) + const { data: publicSettings } = useQuery({ + queryKey: ['public-settings'], + queryFn: async () => { + const res = await fetch('/api/settings/public') + if (!res.ok) return { demo_enabled: false } + return res.json() as Promise<{ demo_enabled: boolean }> + }, + staleTime: 5 * 60 * 1000, + }) const navigate = useNavigate() const queryClient = useQueryClient() const { refetch } = useAuth() @@ -102,29 +111,33 @@ export const Login = () => { isLoading={isLoading || login.isPending} /> -
-
- -
-
- - Or continue with - -
-
+ {publicSettings?.demo_enabled && ( + <> +
+
+ +
+
+ + Or continue with + +
+
- -

- Explore analytics and monitoring with sample data -

+ +

+ Explore analytics and monitoring with sample data +

+ + )}
) diff --git a/web/src/pages/ServiceDetail.tsx b/web/src/pages/ServiceDetail.tsx index 40ca609d..93aeeb7d 100644 --- a/web/src/pages/ServiceDetail.tsx +++ b/web/src/pages/ServiceDetail.tsx @@ -53,6 +53,7 @@ import { MoreVertical, Pencil, RefreshCcw, + Server, Trash2, } from 'lucide-react' import { useEffect, useState } from 'react' @@ -70,6 +71,7 @@ export function ServiceDetail() { const [isBackupDialogOpen, setIsBackupDialogOpen] = useState(false) const [isStopDialogOpen, setIsStopDialogOpen] = useState(false) const [error, setError] = useState(null) + const [prevStatus, setPrevStatus] = useState(undefined) const [visibleParameters, setVisibleParameters] = useState>( new Set() ) @@ -84,6 +86,10 @@ export function ServiceDetail() { path: { id: parseInt(id!) }, }), enabled: !!id, + refetchInterval: (query) => { + const status = query.state.data?.service?.status + return status === 'creating' ? 2000 : false + }, }) // Query for environment variables @@ -127,6 +133,19 @@ export function ServiceDetail() { usePageTitle(service?.service?.name || 'Service Details') + // Notify when cluster creation completes or fails + useEffect(() => { + const currentStatus = service?.service?.status + if (prevStatus === 'creating' && currentStatus === 'running') { + toast.success('Cluster created successfully') + } else if (prevStatus === 'creating' && currentStatus === 'failed') { + toast.error('Cluster creation failed') + } + if (currentStatus) { + setPrevStatus(currentStatus) + } + }, [service?.service?.status, prevStatus]) + const startService = useMutation({ ...startServiceMutation(), meta: { @@ -149,6 +168,37 @@ export function ServiceDetail() { }, }) + const retryCluster = useMutation({ + mutationFn: async (options: { + path: { id: number } + body: { members: { role: string; node_id?: number }[] } + }) => { + const response = await fetch( + `/api/external-services/${options.path.id}/retry`, + { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + credentials: 'include', + body: JSON.stringify(options.body), + } + ) + if (!response.ok) { + const error = await response.json().catch(() => ({})) + throw new Error(error.detail || 'Retry failed') + } + return response.json() + }, + onSuccess: () => { + toast.success('Cluster retry initiated') + refetch() + }, + onError: (error: Error) => { + toast.error('Failed to retry cluster', { + description: error.message, + }) + }, + }) + const deleteService = useMutation({ ...deleteServiceMutation(), meta: { @@ -272,6 +322,12 @@ export function ServiceDetail() { /> {service.service.service_type} + {service.service.topology === 'cluster' && ( + + + Cluster + + )}

Created @@ -411,6 +467,154 @@ export function ServiceDetail() { + {/* Cluster Creation Progress */} + {service.service.topology === 'cluster' && + service.service.status === 'creating' && ( + + + + + Creating cluster members... + {' '} + This may take a minute. Members will appear below as they are + provisioned. + + + )} + + {/* Cluster Creation Failed */} + {service.service.topology === 'cluster' && + service.service.status === 'failed' && ( + + + +

+ + Cluster creation failed. + {' '} + {(service.service as Record).error_message + ? String( + (service.service as Record) + .error_message + ) + : 'An unknown error occurred.'} +
+ + + + )} + + {/* Cluster Members Section */} + {service.service.topology === 'cluster' && + service.service.members && + service.service.members.length > 0 && ( + + + + Cluster Members + + {service.service.members.length} + + + + pg_auto_failover cluster nodes + + + +
+ {service.service.members.map((member) => ( +
+
+ {member.status === 'creating' ? ( + + ) : ( + + )} +
+
+ + {member.container_name} + + + {member.role} + +
+
+ {member.hostname && ( + {member.hostname} + )} + {member.port && :{member.port}} + {member.node_id && ( + + (node {member.node_id}) + + )} +
+
+
+ + {member.status === 'creating' && ( + + )} + {member.status} + +
+ ))} +
+
+
+ )} + {/* Service Configuration Section */} diff --git a/web/src/pages/Storage.tsx b/web/src/pages/Storage.tsx index d674891b..3ebf0f37 100644 --- a/web/src/pages/Storage.tsx +++ b/web/src/pages/Storage.tsx @@ -31,6 +31,7 @@ export function Storage() { const [searchParams, setSearchParams] = useSearchParams() const [isEditDialogOpen, setIsEditDialogOpen] = useState(false) const [selectedService, setSelectedService] = useState(null) + const [isCreateDropdownOpen, setIsCreateDropdownOpen] = useState(false) // Get active tab from URL or default to 'external' const activeTab = searchParams.get('tab') || 'external' @@ -52,8 +53,11 @@ export function Storage() { setBreadcrumbs([{ label: 'Storage', href: '/storage' }]) }, [setBreadcrumbs]) - // Keyboard shortcut: N to create new service (navigate to create page) - useKeyboardShortcut({ key: 'n', path: '/storage/create' }) + // Keyboard shortcut: N to open the create service dropdown + useKeyboardShortcut({ + key: 'n', + callback: () => setIsCreateDropdownOpen(true), + }) usePageTitle('Storage') @@ -114,7 +118,11 @@ export function Storage() {
refetch()} /> - refetch()} /> + refetch()} + open={isCreateDropdownOpen} + onOpenChange={setIsCreateDropdownOpen} + />