From 9b1977993ca056ca628f480ed5a5e1f15a2851b5 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 11:49:06 -0700 Subject: [PATCH 01/64] feat: Migrate database from PostgreSQL to SQLite. --- Cargo.lock | 484 ++---------------- Cargo.toml | 2 +- backend/Cargo.toml | 2 +- backend/src/api/admin.rs | 12 +- backend/src/api/public.rs | 28 +- backend/src/main.rs | 12 +- backend/src/state.rs | 6 +- docker-compose.prod.yaml | 32 +- docker-compose.yaml | 15 +- migrations/20260110000000_initial_schema.sql | 59 +-- .../20260210120000_seed_admin_password.sql | 2 +- scripts/setup-dev.sh | 22 +- 12 files changed, 114 insertions(+), 562 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 062bdde9..013f586d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -139,10 +139,10 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.8.1", + "hyper", "hyper-util", "itoa", "matchit", @@ -156,7 +156,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tower", "tower-layer", @@ -173,13 +173,13 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -216,12 +216,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.1" @@ -234,12 +228,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d809780667f4410e7c41b07f52439b94d2bdf8528eeedc287fa38d3b7f95d82" -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.10.0" @@ -420,16 +408,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -530,12 +508,6 @@ dependencies = [ "syn", ] -[[package]] -name = "deunicode" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd57806937c9cc163efc8ea3910e00a62e2aeb0b8119f1793a978088f8f6b04" - [[package]] name = "digest" version = "0.10.7" @@ -653,12 +625,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - [[package]] name = "find-msvc-tools" version = "0.1.7" @@ -676,33 +642,12 @@ dependencies = [ "spin", ] -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - [[package]] name = "foldhash" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.2" @@ -904,7 +849,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http 1.4.0", + "http", "js-sys", "pin-project", "serde", @@ -934,25 +879,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17e2ac29387b1aa07a1e448f7bb4f35b500787971e965b02842b900afa5c8f6f" -[[package]] -name = "h2" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "hashbrown" version = "0.14.5" @@ -1039,17 +965,6 @@ dependencies = [ "utf8-width", ] -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - [[package]] name = "http" version = "1.4.0" @@ -1060,17 +975,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1078,7 +982,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.4.0", + "http", ] [[package]] @@ -1089,8 +993,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "pin-project-lite", ] @@ -1128,30 +1032,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "hyper" -version = "0.14.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.5.10", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.8.1" @@ -1162,8 +1042,8 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "httparse", "httpdate", "itoa", @@ -1173,19 +1053,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.32", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-util" version = "0.1.19" @@ -1194,9 +1061,9 @@ checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "bytes", "futures-core", - "http 1.4.0", - "http-body 1.0.1", - "hyper 1.8.1", + "http", + "http-body", + "hyper", "pin-project-lite", "tokio", "tower-service", @@ -1353,12 +1220,6 @@ dependencies = [ "rustversion", ] -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - [[package]] name = "itertools" version = "0.14.0" @@ -1390,7 +1251,7 @@ version = "9.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" dependencies = [ - "base64 0.22.1", + "base64", "js-sys", "pem", "ring", @@ -1415,7 +1276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b8731cb00f3f0894058155410b95c8955b17273181d2bc72600ab84edd24f1" dependencies = [ "any_spawner", - "base64 0.22.1", + "base64", "cfg-if", "either_of", "futures", @@ -1612,7 +1473,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66985242812ec95e224fb48effe651ba02728beca92c461a9464c811a71aab11" dependencies = [ "any_spawner", - "base64 0.22.1", + "base64", "codee", "futures", "hydration_context", @@ -1643,7 +1504,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ - "bitflags 2.10.0", + "bitflags", "libc", "redox_syscall 0.7.0", ] @@ -1654,6 +1515,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -1664,12 +1526,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfae20f6b19ad527b550c223fddc3077a547fc70cda94b9b566575423fd303ee" -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - [[package]] name = "litemap" version = "0.8.1" @@ -1745,22 +1601,6 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -[[package]] -name = "migration_tool" -version = "0.1.0" -dependencies = [ - "chrono", - "dotenvy", - "reqwest", - "serde", - "serde_json", - "shared", - "slug", - "sqlx", - "tokio", - "uuid", -] - [[package]] name = "mime" version = "0.3.17" @@ -1797,7 +1637,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 1.4.0", + "http", "httparse", "memchr", "mime", @@ -1805,23 +1645,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "next_tuple" version = "0.1.0" @@ -1925,50 +1748,6 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" -[[package]] -name = "openssl" -version = "0.10.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags 2.10.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "or_poisoned" version = "0.1.0" @@ -2033,7 +1812,7 @@ version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ - "base64 0.22.1", + "base64", "serde_core", ] @@ -2322,7 +2101,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags", ] [[package]] @@ -2331,7 +2110,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" dependencies = [ - "bitflags 2.10.0", + "bitflags", ] [[package]] @@ -2363,46 +2142,6 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "ring" version = "0.17.14" @@ -2458,19 +2197,6 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" -[[package]] -name = "rustix" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" -dependencies = [ - "bitflags 2.10.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.61.2", -] - [[package]] name = "rustls" version = "0.23.36" @@ -2485,15 +2211,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pki-types" version = "1.13.2" @@ -2535,44 +2252,12 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" -dependencies = [ - "windows-sys 0.61.2", -] - [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.10.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "send_wrapper" version = "0.6.0" @@ -2680,9 +2365,9 @@ dependencies = [ "dashmap", "futures", "gloo-net", - "http 1.4.0", + "http", "http-body-util", - "hyper 1.8.1", + "hyper", "inventory", "js-sys", "once_cell", @@ -2821,16 +2506,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "slug" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "882a80f72ee45de3cc9a5afeb2da0331d58df69e4e7d8eeb5d3c7784ae67e724" -dependencies = [ - "deunicode", - "wasm-bindgen", -] - [[package]] name = "smallvec" version = "1.15.1" @@ -2840,16 +2515,6 @@ dependencies = [ "serde", ] -[[package]] -name = "socket2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "socket2" version = "0.6.1" @@ -2898,7 +2563,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "chrono", "crc", @@ -2975,8 +2640,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" dependencies = [ "atoi", - "base64 0.22.1", - "bitflags 2.10.0", + "base64", + "bitflags", "byteorder", "bytes", "chrono", @@ -3019,8 +2684,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" dependencies = [ "atoi", - "base64 0.22.1", - "bitflags 2.10.0", + "base64", + "bitflags", "byteorder", "chrono", "crc", @@ -3123,12 +2788,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -3146,27 +2805,6 @@ dependencies = [ "syn", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "tachys" version = "0.1.9" @@ -3201,19 +2839,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "tempfile" -version = "3.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" -dependencies = [ - "fastrand", - "getrandom 0.3.4", - "once_cell", - "rustix", - "windows-sys 0.61.2", -] - [[package]] name = "thiserror" version = "1.0.69" @@ -3340,7 +2965,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2", "tokio-macros", "windows-sys 0.61.2", ] @@ -3356,16 +2981,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-stream" version = "0.1.18" @@ -3430,7 +3045,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tower-layer", "tower-service", @@ -3443,12 +3058,12 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.10.0", + "bitflags", "bytes", "futures-core", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "http-range-header", "httpdate", @@ -3537,12 +3152,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - [[package]] name = "typed-builder" version = "0.20.1" @@ -3684,15 +3293,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" @@ -4122,16 +3722,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wit-bindgen" version = "0.46.0" diff --git a/Cargo.toml b/Cargo.toml index 09bc9428..152324d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] resolver = "2" -members = ["frontend", "backend", "shared", "migration"] +members = ["frontend", "backend", "shared"] [workspace.dependencies] leptos = { version = "0.7" } diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 1c01d0bd..83fb500d 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" axum = { workspace = true } tokio = { workspace = true } sqlx = { version = "0.8", features = [ - "postgres", + "sqlite", "runtime-tokio-rustls", "uuid", "chrono", diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index e8fef503..ff170cb7 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -12,7 +12,7 @@ use axum::response::Html; use axum::response::IntoResponse; use axum::response::Redirect; use serde::{Deserialize, Serialize}; -use sqlx::PgPool; +use sqlx::SqlitePool; use jsonwebtoken::{encode, Header, EncodingKey}; use chrono::{Utc, Duration}; use argon2::{ @@ -83,7 +83,7 @@ pub fn router(state: crate::state::AppState) -> Router { } async fn login( - State(pool): State, + State(pool): State, req: Request, ) -> Result { let (parts, body) = req.into_parts(); @@ -113,7 +113,7 @@ async fn login( return Err((StatusCode::UNSUPPORTED_MEDIA_TYPE, "Unsupported content type".to_string())); }; - let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE username = $1") + let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE username = ?") .bind(&req.username) .fetch_optional(&pool) .await @@ -176,7 +176,7 @@ async fn me(headers: HeaderMap) -> Result<&'static str, StatusCode> { } async fn change_password( - State(pool): State, + State(pool): State, headers: HeaderMap, Json(req): Json, ) -> Result { @@ -198,7 +198,7 @@ async fn change_password( .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Invalid user ID in token".to_string()))?; // Verify current password - let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = $1") + let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") .bind(user_id) .fetch_optional(&pool) .await @@ -214,7 +214,7 @@ async fn change_password( let new_hash = hash_password(&req.new_password) .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Failed to hash password".to_string()))?; - sqlx::query("UPDATE users SET password_hash = $1 WHERE id = $2") + sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) .bind(user_id) .execute(&pool) diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 16ee39e6..51a85cda 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -1,6 +1,6 @@ use axum::{extract::State, routing::get, Json, Router}; use shared::{Article, BlogPost}; -use sqlx::PgPool; +use sqlx::SqlitePool; pub fn router(state: crate::state::AppState) -> Router { Router::new() @@ -15,9 +15,9 @@ async fn health_check() -> &'static str { use sqlx::Row; -async fn list_articles(State(pool): State) -> Json> { +async fn list_articles(State(pool): State) -> Json> { match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT 20") - .map(|row: sqlx::postgres::PgRow| { + .map(|row: sqlx::sqlite::SqliteRow| { let origin_str: String = row.get("origin"); let origin = match origin_str.as_str() { "imported" => shared::Origin::Imported, @@ -25,7 +25,7 @@ async fn list_articles(State(pool): State) -> Json> { _ => shared::Origin::Local, }; Article { - id: row.get("id"), + id: row.get::("id"), wp_id: row.get("wp_id"), slug: row.get("slug"), title: row.get("title"), @@ -49,15 +49,19 @@ async fn list_articles(State(pool): State) -> Json> { } } -async fn list_blog_posts(State(pool): State) -> Json> { +async fn list_blog_posts(State(pool): State) -> Json> { match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") - .map(|row: sqlx::postgres::PgRow| BlogPost { - id: row.get("id"), - slug: row.get("slug"), - title: row.get("title"), - content: row.get("content"), - published_at: row.get("published_at"), - tags: row.get("tags"), + .map(|row: sqlx::sqlite::SqliteRow| { + let tags_str: Option = row.get("tags"); + let tags = tags_str.and_then(|s| serde_json::from_str(&s).ok()); + BlogPost { + id: row.get::("id"), + slug: row.get("slug"), + title: row.get("title"), + content: row.get("content"), + published_at: row.try_get("published_at").unwrap_or_default(), + tags, + } }) .fetch_all(&pool) .await diff --git a/backend/src/main.rs b/backend/src/main.rs index 4454928c..5d917b45 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -11,7 +11,8 @@ use futures_util::StreamExt; use leptos::context::provide_context; use leptos::prelude::*; use leptos_axum::{generate_route_list, LeptosRoutes}; -use sqlx::postgres::PgPoolOptions; +use sqlx::sqlite::{SqlitePoolOptions, SqliteConnectOptions}; +use std::str::FromStr; use std::net::SocketAddr; use tower::ServiceBuilder; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -38,9 +39,14 @@ async fn main() -> Result<(), Box> { let database_url = std::env::var("DATABASE_URL") .map_err(|_| "DATABASE_URL environment variable must be set")?; - let pool = PgPoolOptions::new() + // Parse options and ensure database is created if it doesn't exist + let connect_options = SqliteConnectOptions::from_str(&database_url) + .map_err(|e| format!("Invalid DATABASE_URL: {}", e))? + .create_if_missing(true); + + let pool = SqlitePoolOptions::new() .max_connections(5) - .connect(&database_url) + .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/backend/src/state.rs b/backend/src/state.rs index cbea6ecc..e1868b96 100644 --- a/backend/src/state.rs +++ b/backend/src/state.rs @@ -1,11 +1,11 @@ use axum::extract::FromRef; use leptos::prelude::LeptosOptions; -use sqlx::PgPool; +use sqlx::SqlitePool; #[derive(Clone)] pub struct AppState { pub leptos_options: LeptosOptions, - pub pool: PgPool, + pub pool: SqlitePool, } impl FromRef for LeptosOptions { @@ -14,7 +14,7 @@ impl FromRef for LeptosOptions { } } -impl FromRef for PgPool { +impl FromRef for SqlitePool { fn from_ref(state: &AppState) -> Self { state.pool.clone() } diff --git a/docker-compose.prod.yaml b/docker-compose.prod.yaml index 0cf9e0cc..35d44e0f 100644 --- a/docker-compose.prod.yaml +++ b/docker-compose.prod.yaml @@ -3,39 +3,16 @@ services: build: . restart: always environment: - - DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + - DATABASE_URL=sqlite://data/sqlite.db - LEPTOS_SITE_ADDR=0.0.0.0:3000 - RUST_LOG=info - depends_on: - - db networks: - jake_net volumes: - ./media_mount:/app/media + - ./data:/app/data - db: - image: postgres:15-alpine - restart: always - environment: - POSTGRES_USER: ${POSTGRES_USER} - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - POSTGRES_DB: ${POSTGRES_DB} - volumes: - - db_data:/var/lib/postgresql/data - networks: - - jake_net - - migration: - build: . - depends_on: - - db - environment: - DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} - volumes: - - ./migrations:/migrations - command: sqlx migrate run --source /migrations - networks: - - jake_net + # Embedded SQLite replaces separate db and migration services nginx: image: nginx:stable-alpine @@ -61,8 +38,5 @@ services: - ./certbot/www:/var/www/certbot entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" -volumes: - db_data: - networks: jake_net: diff --git a/docker-compose.yaml b/docker-compose.yaml index 7f75172a..9b1c0392 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,18 +1,7 @@ version: "3.8" services: - db: - image: docker.io/bitnami/postgresql:15 - restart: always - userns_mode: keep-id - environment: - POSTGRESQL_USERNAME: admin - POSTGRESQL_PASSWORD: password - POSTGRESQL_DATABASE: portfolio - ports: - - "5432:5432" - volumes: - - db_data_v2:/bitnami/postgresql + # The web and backend services will now use a local SQLite DB file. volumes: - db_data_v2: + diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index f072fdc0..05d072ad 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,17 +1,14 @@ --- Enable UUID extension -CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; - -- Users (Admin) CREATE TABLE users ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -20,74 +17,72 @@ CREATE TABLE articles ( content TEXT NOT NULL, -- HTML content cover_image_url TEXT, author TEXT NOT NULL, - published_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + published_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, origin TEXT NOT NULL DEFAULT 'local', -- 'imported', 'synced', 'local' - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Personal Blog Posts CREATE TABLE blog_posts ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text - published_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - tags TEXT[], - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + published_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + tags TEXT, -- JSON Array + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Creative Writing (Stories, Novels, Poetry) -CREATE TYPE creative_type AS ENUM ('story', 'novel', 'poetry'); CREATE TABLE creative_works ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, - work_type creative_type NOT NULL, + work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' synopsis TEXT, content TEXT, -- Full text or chapters (can be JSON if complex) status TEXT NOT NULL DEFAULT 'published', -- 'draft', 'published' - published_at TIMESTAMPTZ DEFAULT NOW(), - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + published_at DATETIME DEFAULT CURRENT_TIMESTAMP, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Media Items (Photography, Visual Art, J-School Video, Videography) -CREATE TYPE media_category AS ENUM ('photography', 'visual_art', 'video', 'j_school'); -CREATE TYPE media_context AS ENUM ('personal', 'professional'); + CREATE TABLE media_items ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, title TEXT, description TEXT, url TEXT NOT NULL, -- S3 URL or local path thumbnail_url TEXT, - category media_category NOT NULL, - context media_context NOT NULL DEFAULT 'personal', -- To distinguish Photojournalism (prof) vs Personal - taken_at TIMESTAMPTZ, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + category TEXT NOT NULL, -- 'photography', 'visual_art', 'video', 'j_school' + context TEXT NOT NULL DEFAULT 'personal', -- To distinguish Photojournalism (prof) vs Personal + taken_at DATETIME, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Music CREATE TABLE music_tracks ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, title TEXT NOT NULL, description TEXT, audio_url TEXT, embed_code TEXT, -- For Soundcloud/Spotify iframe - published_at TIMESTAMPTZ DEFAULT NOW(), - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + published_at DATETIME DEFAULT CURRENT_TIMESTAMP, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); -- Programming Projects CREATE TABLE projects ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + id UUID PRIMARY KEY, name TEXT NOT NULL, description TEXT, github_url TEXT, demo_url TEXT, - technologies TEXT[], + technologies TEXT, -- JSON Array stars INT DEFAULT 0, is_featured BOOLEAN DEFAULT FALSE, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index 5468b3f6..24153e78 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -1,6 +1,6 @@ -- Update or insert admin user with secure default password -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) -VALUES (gen_random_uuid(), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') +VALUES ('00000000-0000-0000-0000-000000000000', 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO UPDATE SET password_hash = EXCLUDED.password_hash; diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index ea4af5b8..be0ee60a 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -49,24 +49,19 @@ if [ "$CONTAINER_CMD" = "docker" ] && ! docker ps &> /dev/null; then fi fi -# Start database -echo "📦 Starting PostgreSQL database..." -COMPOSE_CMD="docker-compose" -if [ "$CONTAINER_CMD" = "podman" ]; then - COMPOSE_CMD="podman-compose" -fi - -$COMPOSE_CMD up -d db -sleep 3 - +# Try to use existing tools if possible, but no background service is needed for sqlite. echo "" echo "⏳ Running database migrations..." +# create an empty sqlite database file if it doesn't exist +touch sqlite.db +export DATABASE_URL="sqlite://sqlite.db" + cargo sqlx database create || true -cargo sqlx migrate run -D "postgres://admin:password@127.0.0.1:5432/portfolio" || true +cargo sqlx migrate run -D $DATABASE_URL || true echo "" echo "👤 Creating default admin user..." -PGPASSWORD=password psql -U admin -h 127.0.0.1 -d portfolio -c "INSERT INTO users (username, password_hash) VALUES ('admin', 'demo-admin-2026!') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('00000000-0000-0000-0000-000000000000', 'admin', '\$argon2id\$v=19\$m=19456,t=2,p=1\$Ewiz6jCZu9NGQaAJtWRLqg\$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" @@ -83,5 +78,4 @@ echo "🔐 Default credentials:" echo " Username: admin" echo " Password: demo-admin-2026!" echo "" -echo "🛑 To stop the database:" -echo " $COMPOSE_CMD down" +echo "🛑 Setup complete." From 694168ac073c90494cd38c234845b126c3cf9b50 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 11:57:45 -0700 Subject: [PATCH 02/64] refactor: use `gen_random_uuid()` for admin user ID in seed migration. --- migrations/20260210120000_seed_admin_password.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index 24153e78..5468b3f6 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -1,6 +1,6 @@ -- Update or insert admin user with secure default password -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) -VALUES ('00000000-0000-0000-0000-000000000000', 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') +VALUES (gen_random_uuid(), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO UPDATE SET password_hash = EXCLUDED.password_hash; From fc6c4eaf1b16e0fc4c60893cc6364bfb7c48a039 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:01:20 -0700 Subject: [PATCH 03/64] feat: dynamically generate UUID for the default admin user. --- scripts/setup-dev.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index be0ee60a..7af8c6cf 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -61,7 +61,8 @@ cargo sqlx migrate run -D $DATABASE_URL || true echo "" echo "👤 Creating default admin user..." -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('00000000-0000-0000-0000-000000000000', 'admin', '\$argon2id\$v=19\$m=19456,t=2,p=1\$Ewiz6jCZu9NGQaAJtWRLqg\$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") +sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('$ADMIN_UUID', 'admin', '\$argon2id\$v=19\$m=19456,t=2,p=1\$Ewiz6jCZu9NGQaAJtWRLqg\$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" From 347a0aeb6071bbc43cea4fe35ba76f2aea1b692e Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:09:49 -0700 Subject: [PATCH 04/64] refactor: update admin user ID generation to use hex(randomblob) for UUID. --- migrations/20260210120000_seed_admin_password.sql | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index 5468b3f6..e1e67017 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -1,6 +1,9 @@ -- Update or insert admin user with secure default password -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) -VALUES (gen_random_uuid(), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') +VALUES ( + lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-a' || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6))), + 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI' +) ON CONFLICT (username) DO UPDATE SET password_hash = EXCLUDED.password_hash; From 6e34da8a7d9c623023c011b9cd0f16ee59443de4 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:28:31 -0700 Subject: [PATCH 05/64] feat: Add `Display`, `FromStr`, and lowercase serde support to `CreativeType` enum. --- shared/src/lib.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/shared/src/lib.rs b/shared/src/lib.rs index dd40b064..83de1930 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -61,12 +61,36 @@ pub struct MediaItem { } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] pub enum CreativeType { Story, Novel, Poetry, } +impl std::fmt::Display for CreativeType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreativeType::Story => write!(f, "story"), + CreativeType::Novel => write!(f, "novel"), + CreativeType::Poetry => write!(f, "poetry"), + } + } +} + +impl std::str::FromStr for CreativeType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "story" => Ok(CreativeType::Story), + "novel" => Ok(CreativeType::Novel), + "poetry" => Ok(CreativeType::Poetry), + _ => Err(format!("Invalid creative type: {}", s)), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CreativeWork { pub id: Uuid, From 1b4b3ab3b740d93f39b2d0e12d39f7becc659282 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:31:33 -0700 Subject: [PATCH 06/64] feat: Add `Display` and `FromStr` implementations for `MediaCategory` and `MediaContext` enums and configure their `serde` renaming. --- shared/src/lib.rs | 48 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 83de1930..71912e91 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -35,6 +35,7 @@ pub struct BlogPost { } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] pub enum MediaCategory { Photography, VisualArt, @@ -42,12 +43,59 @@ pub enum MediaCategory { JSchool, } +impl std::fmt::Display for MediaCategory { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MediaCategory::Photography => write!(f, "photography"), + MediaCategory::VisualArt => write!(f, "visual_art"), + MediaCategory::Video => write!(f, "video"), + MediaCategory::JSchool => write!(f, "j_school"), + } + } +} + +impl std::str::FromStr for MediaCategory { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "photography" => Ok(MediaCategory::Photography), + "visual_art" => Ok(MediaCategory::VisualArt), + "video" => Ok(MediaCategory::Video), + "j_school" => Ok(MediaCategory::JSchool), + _ => Err(format!("Invalid media category: {}", s)), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] pub enum MediaContext { Personal, Professional, } +impl std::fmt::Display for MediaContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MediaContext::Personal => write!(f, "personal"), + MediaContext::Professional => write!(f, "professional"), + } + } +} + +impl std::str::FromStr for MediaContext { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "personal" => Ok(MediaContext::Personal), + "professional" => Ok(MediaContext::Professional), + _ => Err(format!("Invalid media context: {}", s)), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MediaItem { pub id: Uuid, From 3bb14ffb6f00404558429cd5c360d9bf847a20b4 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:36:55 -0700 Subject: [PATCH 07/64] refactor: Directly retrieve `published_at` from database row instead of using `try_get` with a default. --- backend/src/api/public.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 51a85cda..0b01882f 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -59,7 +59,7 @@ async fn list_blog_posts(State(pool): State) -> Json> slug: row.get("slug"), title: row.get("title"), content: row.get("content"), - published_at: row.try_get("published_at").unwrap_or_default(), + published_at: row.get("published_at"), tags, } }) From 1f974abcad04b9493b07b4d6b232adae3df2bf8c Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 12:41:46 -0700 Subject: [PATCH 08/64] feat: Configure SQLite with WAL journal mode, busy timeout, and ISO 8601 timestamp defaults, and improve dev setup script. --- backend/src/main.rs | 4 ++- docker-compose.yaml | 4 +-- migrations/20260110000000_initial_schema.sql | 26 ++++++++++---------- scripts/setup-dev.sh | 6 +++-- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/backend/src/main.rs b/backend/src/main.rs index 5d917b45..a7e82259 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -42,7 +42,9 @@ async fn main() -> Result<(), Box> { // Parse options and ensure database is created if it doesn't exist let connect_options = SqliteConnectOptions::from_str(&database_url) .map_err(|e| format!("Invalid DATABASE_URL: {}", e))? - .create_if_missing(true); + .create_if_missing(true) + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() .max_connections(5) diff --git a/docker-compose.yaml b/docker-compose.yaml index 9b1c0392..cecb119c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,7 +1,5 @@ version: "3.8" -services: +# services: # The web and backend services will now use a local SQLite DB file. -volumes: - diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 05d072ad..c73e1242 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -3,7 +3,7 @@ CREATE TABLE users ( id UUID PRIMARY KEY, username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Articles (Journalism - Imported/Synced) @@ -17,10 +17,10 @@ CREATE TABLE articles ( content TEXT NOT NULL, -- HTML content cover_image_url TEXT, author TEXT NOT NULL, - published_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + published_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), origin TEXT NOT NULL DEFAULT 'local', -- 'imported', 'synced', 'local' - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), + updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Personal Blog Posts @@ -29,10 +29,10 @@ CREATE TABLE blog_posts ( slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text - published_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + published_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), tags TEXT, -- JSON Array - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), + updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Creative Writing (Stories, Novels, Poetry) @@ -44,8 +44,8 @@ CREATE TABLE creative_works ( synopsis TEXT, content TEXT, -- Full text or chapters (can be JSON if complex) status TEXT NOT NULL DEFAULT 'published', -- 'draft', 'published' - published_at DATETIME DEFAULT CURRENT_TIMESTAMP, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + published_at DATETIME DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Media Items (Photography, Visual Art, J-School Video, Videography) @@ -60,7 +60,7 @@ CREATE TABLE media_items ( category TEXT NOT NULL, -- 'photography', 'visual_art', 'video', 'j_school' context TEXT NOT NULL DEFAULT 'personal', -- To distinguish Photojournalism (prof) vs Personal taken_at DATETIME, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Music @@ -70,8 +70,8 @@ CREATE TABLE music_tracks ( description TEXT, audio_url TEXT, embed_code TEXT, -- For Soundcloud/Spotify iframe - published_at DATETIME DEFAULT CURRENT_TIMESTAMP, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + published_at DATETIME DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Programming Projects @@ -84,5 +84,5 @@ CREATE TABLE projects ( technologies TEXT, -- JSON Array stars INT DEFAULT 0, is_featured BOOLEAN DEFAULT FALSE, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 7af8c6cf..dd9d9431 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -57,12 +57,14 @@ touch sqlite.db export DATABASE_URL="sqlite://sqlite.db" cargo sqlx database create || true -cargo sqlx migrate run -D $DATABASE_URL || true +cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" echo "👤 Creating default admin user..." ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('$ADMIN_UUID', 'admin', '\$argon2id\$v=19\$m=19456,t=2,p=1\$Ewiz6jCZu9NGQaAJtWRLqg\$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db < Date: Thu, 19 Mar 2026 12:44:23 -0700 Subject: [PATCH 09/64] feat: Improve blog post tag deserialization error handling and document `uuidgen` fallback behavior in the dev setup script. --- backend/src/api/public.rs | 8 +++++++- scripts/setup-dev.sh | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 0b01882f..18834fe4 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -53,7 +53,13 @@ async fn list_blog_posts(State(pool): State) -> Json> match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") .map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); - let tags = tags_str.and_then(|s| serde_json::from_str(&s).ok()); + let tags = tags_str.and_then(|s| match serde_json::from_str(&s) { + Ok(t) => Some(t), + Err(e) => { + tracing::warn!("Failed to deserialize tags: {}", e); + None + } + }); BlogPost { id: row.get::("id"), slug: row.get("slug"), diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index dd9d9431..835dd9a0 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -61,6 +61,8 @@ cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" echo "👤 Creating default admin user..." +# Note: If uuidgen is unavailable, a hardcoded fallback UUID is used. +# This results in shared admin IDs across affected dev installs, which is acceptable for dev environments. ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") sqlite3 sqlite.db < Date: Thu, 19 Mar 2026 12:54:32 -0700 Subject: [PATCH 10/64] feat: Enhance article content rendering with improved date formatting, image linking, paragraph wrapping, and admin article editing capabilities. --- .env.example | 5 +- .envrc | 3 +- backend/src/api/admin.rs | 103 ++++++++----- backend/src/main.rs | 6 +- frontend/src/api/articles.rs | 31 +++- frontend/src/components/media_picker.rs | 33 ++-- frontend/src/components/mod.rs | 2 +- frontend/src/data/journalism.rs | 5 +- frontend/src/lib.rs | 2 +- frontend/src/pages/about.rs | 2 +- frontend/src/pages/admin/login.rs | 33 ++-- frontend/src/pages/admin/password_change.rs | 19 ++- frontend/src/pages/sections.rs | 152 +++++++++++++------ migrations/20260110000000_initial_schema.sql | 29 ++++ 14 files changed, 288 insertions(+), 137 deletions(-) diff --git a/.env.example b/.env.example index 514787da..30192ad6 100644 --- a/.env.example +++ b/.env.example @@ -1,8 +1,5 @@ # Database configuration -DATABASE_URL=postgres://admin:password@127.0.0.1:5432/portfolio -POSTGRES_USER=admin -POSTGRES_PASSWORD=password -POSTGRES_DB=portfolio +DATABASE_URL=sqlite:sqlite.db # Server configuration LEPTOS_SITE_ADDR=0.0.0.0:3000 diff --git a/.envrc b/.envrc index 2a701615..c739b755 100644 --- a/.envrc +++ b/.envrc @@ -1,8 +1,9 @@ +#!/usr/bin/env bash use flake # Load environment variables export RUST_LOG=info -export DATABASE_URL="postgres://admin:password@127.0.0.1:5432/portfolio" +export DATABASE_URL="sqlite:sqlite.db" export RUST_BACKTRACE=1 # Optional: Print helpful info when entering directory diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index ff170cb7..c0e74738 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -1,27 +1,24 @@ -use axum::{ - extract::State, - http::{HeaderMap, StatusCode}, - routing::{get, post}, - Router, +use argon2::{ + password_hash::{rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, + Argon2, }; use axum::body::to_bytes; -use axum::http::{header, Request}; use axum::body::Body; -use axum::response::Json; +use axum::http::{header, Request}; use axum::response::Html; use axum::response::IntoResponse; +use axum::response::Json; use axum::response::Redirect; +use axum::{ + extract::State, + http::{HeaderMap, StatusCode}, + routing::{get, post}, + Router, +}; +use chrono::{Duration, Utc}; +use jsonwebtoken::{encode, EncodingKey, Header}; use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; -use jsonwebtoken::{encode, Header, EncodingKey}; -use chrono::{Utc, Duration}; -use argon2::{ - password_hash::{ - rand_core::OsRng, - PasswordHash, PasswordHasher, PasswordVerifier, SaltString - }, - Argon2 -}; fn get_jwt_secret() -> &'static [u8] { // In production, use environment variable: std::env::var("JWT_SECRET").unwrap_or_default().as_bytes() @@ -61,7 +58,8 @@ struct UserRow { fn hash_password(password: &str) -> Result { let salt = SaltString::generate(&mut OsRng); let argon2 = Argon2::default(); - argon2.hash_password(password.as_bytes(), &salt) + argon2 + .hash_password(password.as_bytes(), &salt) .map_err(|e| e.to_string()) .map(|hash| hash.to_string()) } @@ -71,7 +69,9 @@ fn verify_password(password: &str, password_hash: &str) -> bool { Ok(h) => h, Err(_) => return false, }; - Argon2::default().verify_password(password.as_bytes(), &parsed_hash).is_ok() + Argon2::default() + .verify_password(password.as_bytes(), &parsed_hash) + .is_ok() } pub fn router(state: crate::state::AppState) -> Router { @@ -110,14 +110,23 @@ async fn login( serde_urlencoded::from_bytes(&bytes) .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid form data".to_string()))? } else { - return Err((StatusCode::UNSUPPORTED_MEDIA_TYPE, "Unsupported content type".to_string())); + return Err(( + StatusCode::UNSUPPORTED_MEDIA_TYPE, + "Unsupported content type".to_string(), + )); }; - let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE username = ?") - .bind(&req.username) - .fetch_optional(&pool) - .await - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Database error".to_string()))?; + let user: Option = + sqlx::query_as("SELECT id, password_hash FROM users WHERE username = ?") + .bind(&req.username) + .fetch_optional(&pool) + .await + .map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + ) + })?; let is_invalid = match user { Some(ref u) => !verify_password(&req.password, &u.password_hash), @@ -148,7 +157,12 @@ async fn login( &claims, &EncodingKey::from_secret(get_jwt_secret()), ) - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Token generation failed".to_string()))?; + .map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Token generation failed".to_string(), + ) + })?; if content_type.contains("application/x-www-form-urlencoded") || content_type.contains("multipart/form-data") @@ -171,7 +185,6 @@ async fn me(headers: HeaderMap) -> Result<&'static str, StatusCode> { .and_then(|s| s.strip_prefix("Bearer ")) .ok_or(StatusCode::UNAUTHORIZED)?; - Ok("Authenticated") } @@ -192,34 +205,56 @@ async fn change_password( token, &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), &validation, - ).map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; + ) + .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - let user_id = token_data.claims.sub.parse::() - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Invalid user ID in token".to_string()))?; + let user_id = token_data.claims.sub.parse::().map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Invalid user ID in token".to_string(), + ) + })?; // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") .bind(user_id) .fetch_optional(&pool) .await - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Database error".to_string()))?; + .map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + ) + })?; let user = user.ok_or((StatusCode::NOT_FOUND, "User not found".to_string()))?; if !verify_password(&req.current_password, &user.password_hash) { - return Err((StatusCode::FORBIDDEN, "Invalid current password".to_string())); + return Err(( + StatusCode::FORBIDDEN, + "Invalid current password".to_string(), + )); } // Hash new password and update - let new_hash = hash_password(&req.new_password) - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Failed to hash password".to_string()))?; + let new_hash = hash_password(&req.new_password).map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to hash password".to_string(), + ) + })?; sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) .bind(user_id) .execute(&pool) .await - .map_err(|_| (StatusCode::INTERNAL_SERVER_ERROR, "Database update failed".to_string()))?; + .map_err(|_| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database update failed".to_string(), + ) + })?; Ok(StatusCode::OK) } diff --git a/backend/src/main.rs b/backend/src/main.rs index a7e82259..603a9e83 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -1,8 +1,8 @@ #![recursion_limit = "256"] -use axum::{extract::State, Router}; use axum::body::Body; use axum::http::Request; use axum::middleware::{self, Next}; +use axum::{extract::State, Router}; use bytes::Bytes; use dotenvy::dotenv; use frontend::{App, Shell}; @@ -11,9 +11,9 @@ use futures_util::StreamExt; use leptos::context::provide_context; use leptos::prelude::*; use leptos_axum::{generate_route_list, LeptosRoutes}; -use sqlx::sqlite::{SqlitePoolOptions, SqliteConnectOptions}; -use std::str::FromStr; +use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; use std::net::SocketAddr; +use std::str::FromStr; use tower::ServiceBuilder; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index b441f6d1..7e34aad8 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -36,7 +36,7 @@ pub mod ssr_utils { // Simple JWT verification helper // In a real app, this should be shared with backend logic pub fn verify_token(token: &str) -> Result { - use jsonwebtoken::{decode, DecodingKey, Validation, Algorithm}; + use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; use serde::Deserialize; #[derive(Deserialize)] @@ -53,7 +53,8 @@ pub mod ssr_utils { token, &DecodingKey::from_secret(secret), &Validation::new(Algorithm::HS256), - ).map_err(|_| ServerFnError::new("Invalid token"))?; + ) + .map_err(|_| ServerFnError::new("Invalid token"))?; Ok(token_data.claims.sub) } @@ -116,7 +117,9 @@ pub async fn save_article(token: String, article: Article) -> Result<(), ServerF } // Sanitize slug just in case - let safe_slug = article.slug.chars() + let safe_slug = article + .slug + .chars() .filter(|c| c.is_alphanumeric() || *c == '-') .collect::() .to_lowercase(); @@ -173,7 +176,9 @@ pub async fn list_media(token: String) -> Result, ServerFnError> for line in stdout.lines() { let line = line.trim(); - if line.is_empty() || line.ends_with('/') { continue; } // Skip directories + if line.is_empty() || line.ends_with('/') { + continue; + } // Skip directories if let Some(path) = line.strip_prefix("gs://jakewray-portfolio/") { let name = path.split('/').last().unwrap_or(path).to_string(); @@ -188,17 +193,24 @@ pub async fn list_media(token: String) -> Result, ServerFnError> } #[server(UploadMedia, "/api")] -pub async fn upload_media(token: String, filename: String, data: Vec) -> Result { +pub async fn upload_media( + token: String, + filename: String, + data: Vec, +) -> Result { use self::ssr_utils::verify_token; - use std::process::{Command, Stdio}; use std::io::Write; + use std::process::{Command, Stdio}; verify_token(&token)?; // We'll upload to a 'uploads' folder for manual picking or sorting later let timestamp = chrono::Utc::now().timestamp(); let safe_name = format!("{}_{}", timestamp, filename.replace(" ", "_")); - let destination = format!("gs://jakewray-portfolio/media/journalism/uploads/{}", safe_name); + let destination = format!( + "gs://jakewray-portfolio/media/journalism/uploads/{}", + safe_name + ); let mut child = Command::new("gsutil") .arg("cp") @@ -216,5 +228,8 @@ pub async fn upload_media(token: String, filename: String, data: Vec) -> Res return Err(ServerFnError::new("Failed to upload to GCS")); } - Ok(format!("https://storage.googleapis.com/jakewray-portfolio/media/journalism/uploads/{}", safe_name)) + Ok(format!( + "https://storage.googleapis.com/jakewray-portfolio/media/journalism/uploads/{}", + safe_name + )) } diff --git a/frontend/src/components/media_picker.rs b/frontend/src/components/media_picker.rs index 1b39d3fa..26592a5e 100644 --- a/frontend/src/components/media_picker.rs +++ b/frontend/src/components/media_picker.rs @@ -1,17 +1,18 @@ -use leptos::prelude::*; use crate::api::articles::{list_media, upload_media, MediaItem}; -use leptos::task::spawn_local; use leptos::ev; -use web_sys::{HtmlInputElement, FileList}; +use leptos::prelude::*; +use leptos::task::spawn_local; use wasm_bindgen_futures::JsFuture; +use web_sys::{FileList, HtmlInputElement}; #[component] pub fn MediaPicker( token: Signal, on_select: F, - current_image: Option -) -> impl IntoView -where F: Fn(String) + 'static + Send + Sync + Clone + current_image: Option, +) -> impl IntoView +where + F: Fn(String) + 'static + Send + Sync + Clone, { let (items, set_items) = signal(Vec::::new()); let (loading, set_loading) = signal(true); @@ -36,7 +37,9 @@ where F: Fn(String) + 'static + Send + Sync + Clone // Initial fetch Effect::new({ let fetch = fetch_media.clone(); - move || { fetch(); } + move || { + fetch(); + } }); let on_upload = { @@ -52,7 +55,7 @@ where F: Fn(String) + 'static + Send + Sync + Clone let filename = file.name(); let file_clone = file.clone(); // web_sys::File is Clone (JsValue wrapper) set_uploading.set(true); - + spawn_local(async move { // Read file as bytes via web_sys let array_buffer_promise = file_clone.array_buffer(); @@ -60,14 +63,14 @@ where F: Fn(String) + 'static + Send + Sync + Clone Ok(array_buffer) => { let uint8_array = js_sys::Uint8Array::new(&array_buffer); let bytes = uint8_array.to_vec(); - + match upload_media(t, filename, bytes).await { Ok(_url) => { f_clone(); // Refresh list - }, + } Err(e) => set_error_msg.set(format!("Upload failed: {}", e)), } - }, + } Err(e) => set_error_msg.set(format!("File read failed: {:?}", e)), } set_uploading.set(false); @@ -77,8 +80,6 @@ where F: Fn(String) + 'static + Send + Sync + Clone } }; - - view! {
@@ -107,15 +108,15 @@ where F: Fn(String) + 'static + Send + Sync + Clone } else { let on_select = on_select.clone(); let current_img = current_image.clone(); - + items.get().into_iter().map(move |item| { let url = item.url.clone(); let is_selected = current_img.as_ref() == Some(&url); let os = on_select.clone(); let u = url.clone(); - + view! { -
&'static [JournalismArticle] { } pub fn find_article(slug: &str) -> Option { - ARTICLES.iter().find(|article| article.slug == slug).cloned() + ARTICLES + .iter() + .find(|article| article.slug == slug) + .cloned() } diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 3d51d373..5727fe05 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -1,6 +1,6 @@ +pub mod api; mod app; pub mod components; -pub mod api; pub mod data; pub mod pages; pub use app::*; diff --git a/frontend/src/pages/about.rs b/frontend/src/pages/about.rs index 8cbc175f..51a0e5b2 100644 --- a/frontend/src/pages/about.rs +++ b/frontend/src/pages/about.rs @@ -5,7 +5,7 @@ pub fn AboutPage() -> impl IntoView { view! {

"About Me"

- +

"I am a journalist, developer, and photographer based in Northern British Columbia. I have a passion for uncovering stories that matter and documenting the world around me through both words and images." diff --git a/frontend/src/pages/admin/login.rs b/frontend/src/pages/admin/login.rs index 849333bd..8d60ff0c 100644 --- a/frontend/src/pages/admin/login.rs +++ b/frontend/src/pages/admin/login.rs @@ -49,7 +49,9 @@ pub fn AdminLoginPage() -> impl IntoView { let password_val = _password.get(); let navigate = navigate.clone(); - web_sys::console::log_1(&format!("[Login] Attempting login for user: {}", username_val).into()); + web_sys::console::log_1( + &format!("[Login] Attempting login for user: {}", username_val).into(), + ); spawn_local(async move { let req = LoginRequest { @@ -64,31 +66,36 @@ pub fn AdminLoginPage() -> impl IntoView { .header("Content-Type", "application/json") .json(&req) .map_err(|e| { - web_sys::console::log_1(&format!("[Login] Serialize error: {:?}", e).into()); + web_sys::console::log_1( + &format!("[Login] Serialize error: {:?}", e).into(), + ); "Failed to serialize request".to_string() })? .send() .await .map_err(|e| { - web_sys::console::log_1(&format!("[Login] Network error: {:?}", e).into()); + web_sys::console::log_1( + &format!("[Login] Network error: {:?}", e).into(), + ); "Failed to connect to server".to_string() })?; - web_sys::console::log_1(&format!("[Login] Response status: {}", resp.status()).into()); + web_sys::console::log_1( + &format!("[Login] Response status: {}", resp.status()).into(), + ); if !resp.ok() { return Err("Invalid username or password".to_string()); } - let data: LoginResponse = resp - .json() - .await - .map_err(|e| { - web_sys::console::log_1(&format!("[Login] Parse error: {:?}", e).into()); - "Failed to parse response".to_string() - })?; + let data: LoginResponse = resp.json().await.map_err(|e| { + web_sys::console::log_1(&format!("[Login] Parse error: {:?}", e).into()); + "Failed to parse response".to_string() + })?; - web_sys::console::log_1(&"[Login] Token received, storing in localStorage".into()); + web_sys::console::log_1( + &"[Login] Token received, storing in localStorage".into(), + ); // Store token in localStorage let window = web_sys::window().unwrap(); @@ -103,7 +110,7 @@ pub fn AdminLoginPage() -> impl IntoView { Ok(()) => { web_sys::console::log_1(&"[Login] Success, navigating to dashboard".into()); navigate("/admin/dashboard", Default::default()) - }, + } Err(msg) => { web_sys::console::log_1(&format!("[Login] Error: {}", msg).into()); set_error.set(msg); diff --git a/frontend/src/pages/admin/password_change.rs b/frontend/src/pages/admin/password_change.rs index 95964f92..94e1f486 100644 --- a/frontend/src/pages/admin/password_change.rs +++ b/frontend/src/pages/admin/password_change.rs @@ -97,12 +97,12 @@ pub fn AdminPasswordChange() -> impl IntoView { // Let me check `backend/src/api/mod.rs`. // If `login.rs` works with `/admin/login`, then the backend MUST be serving it there. // I will check `backend/src/api/mod.rs` to see if it nests `admin::router`. - .header("Authorization", &format!("Bearer {}", token)) - .header("Content-Type", "application/json") - .json(&req) - .unwrap() - .send() - .await; + .header("Authorization", &format!("Bearer {}", token)) + .header("Content-Type", "application/json") + .json(&req) + .unwrap() + .send() + .await; match resp { Ok(r) => { @@ -112,12 +112,15 @@ pub fn AdminPasswordChange() -> impl IntoView { set_new_password.set("".to_string()); set_confirm_password.set("".to_string()); } else { - let text = r.text().await.unwrap_or_else(|_| "Unknown error".to_string()); + let text = r + .text() + .await + .unwrap_or_else(|_| "Unknown error".to_string()); set_error.set(format!("Error: {}", text)); } } Err(e) => { - set_error.set(format!("Network error: {}", e)); + set_error.set(format!("Network error: {}", e)); } } set_loading.set(false); diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index fdfc7701..b556544c 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -1,11 +1,11 @@ // use crate::data::journalism; // Deprecated use crate::api::articles::{get_articles, Article}; +use crate::components::media_picker::MediaPicker; use leptos::prelude::*; -use leptos_router::hooks::use_params_map; use leptos::task::spawn_local; -use leptos_router::components::A; -use crate::components::media_picker::MediaPicker; use leptos::wasm_bindgen::JsCast; +use leptos_router::components::A; +use leptos_router::hooks::use_params_map; fn strip_tags(s: &str) -> String { let mut out = String::with_capacity(s.len()); @@ -14,7 +14,11 @@ fn strip_tags(s: &str) -> String { match ch { '<' => in_tag = true, '>' => in_tag = false, - _ => if !in_tag { out.push(ch) }, + _ => { + if !in_tag { + out.push(ch) + } + } } } out.trim().to_string() @@ -23,9 +27,27 @@ fn strip_tags(s: &str) -> String { fn starts_with_month(s: &str) -> bool { let sm = s.trim_start(); const MONTHS: [&str; 21] = [ - "Jan.", "January", "Feb.", "February", "Mar.", "March", "Apr.", "April", - "May", "June", "July", "Aug.", "August", "Sept.", "September", "Oct.", - "October", "Nov.", "November", "Dec.", "December", + "Jan.", + "January", + "Feb.", + "February", + "Mar.", + "March", + "Apr.", + "April", + "May", + "June", + "July", + "Aug.", + "August", + "Sept.", + "September", + "Oct.", + "October", + "Nov.", + "November", + "Dec.", + "December", ]; MONTHS.iter().any(|m| { if sm.starts_with(m) { @@ -38,11 +60,19 @@ fn starts_with_month(s: &str) -> bool { }) } -fn extract_between(haystack: &str, start_pat: &str, end_pat: &str, from: usize) -> Option<(String, usize)> { +fn extract_between( + haystack: &str, + start_pat: &str, + end_pat: &str, + from: usize, +) -> Option<(String, usize)> { let start_idx = haystack[from..].find(start_pat)? + from; let after = start_idx + start_pat.len(); let end_idx = haystack[after..].find(end_pat)? + after; - Some((haystack[after..end_idx].to_string(), end_idx + end_pat.len())) + Some(( + haystack[after..end_idx].to_string(), + end_idx + end_pat.len(), + )) } #[allow(dead_code)] @@ -61,9 +91,13 @@ fn extract_printed_date(html: &str) -> Option { if let Some((p_inner, next)) = extract_between(html, "", pos) { let open_end = p_inner.find('>').map(|i| i + 1).unwrap_or(0); let text = strip_tags(&p_inner[open_end..]); - if starts_with_month(&text) { return Some(text); } + if starts_with_month(&text) { + return Some(text); + } pos = next; - } else { break; } + } else { + break; + } } None } @@ -88,7 +122,7 @@ fn extract_body_preview(html: &str) -> Option { #[allow(dead_code)] fn replace_date_paragraph(html: &str, new_date: &str) -> String { // Reuse extract logic to find the range, then replace it - let after_h4 = html.find("").map(|idx| idx + 5).unwrap_or(0); + let after_h4 = html.find("").map(|idx| idx + 5).unwrap_or(0); let mut pos = after_h4; for _ in 0..5 { if let Some((p_inner, next)) = extract_between(html, "", pos) { @@ -97,19 +131,24 @@ fn replace_date_paragraph(html: &str, new_date: &str) -> String { if starts_with_month(&text) { if let Some(start_rel) = html[pos..].find("") { - let end_abs = after_start + end_rel + 4; //

len - let mut out = html.to_string(); - // Construct replacement paragraph - let replacement = format!("

{}

", new_date); - out.replace_range(start_abs..end_abs, &replacement); - return out; + let end_abs = after_start + end_rel + 4; //

len + let mut out = html.to_string(); + // Construct replacement paragraph + let replacement = format!( + "

{}

", + new_date + ); + out.replace_range(start_abs..end_abs, &replacement); + return out; } } } pos = next; - } else { break; } + } else { + break; + } } html.to_string() } @@ -176,24 +215,30 @@ fn linkify_images(html: &str) -> String { let after_src = src_start_rel + 5; if let Some(src_end_rel) = img_tag[after_src..].find('"') { Some(&img_tag[after_src..after_src + src_end_rel]) - } else { None } - } else { None }; + } else { + None + } + } else { + None + }; if let Some(src_url) = src { - let wrapper_start = format!("", src_url); - let wrapper_end = ""; - - // Replace strict range - let new_content = format!("{}{}{}", wrapper_start, img_tag, wrapper_end); - out.replace_range(abs_open..abs_close, &new_content); - - search_pos = abs_open + new_content.len(); - continue; + let wrapper_start = format!( + "", + src_url + ); + let wrapper_end = ""; + + // Replace strict range + let new_content = format!("{}{}{}", wrapper_start, img_tag, wrapper_end); + out.replace_range(abs_open..abs_close, &new_content); + + search_pos = abs_open + new_content.len(); + continue; } - search_pos = abs_close; - + search_pos = abs_close; } else { - search_pos = abs_open + 4; + search_pos = abs_open + 4; } } out @@ -223,14 +268,19 @@ fn italicize_origin_line(html: &str) -> String { } search_pos = abs_content_end + 4; - } else { break; } - } else { search_pos = abs_open + 2; } + } else { + break; + } + } else { + search_pos = abs_open + 2; + } } out } fn format_cp_style(date: &str) -> String { - let date = date.replace("January", "Jan.") + let date = date + .replace("January", "Jan.") .replace("February", "Feb.") .replace("August", "Aug.") .replace("September", "Sept.") @@ -299,7 +349,7 @@ pub fn JournalismArticlePage() -> impl IntoView { #[cfg(target_arch = "wasm32")] web_sys::console::log_1(&"Rendering JournalismArticlePage".into()); - use crate::api::articles::{get_article, save_article, delete_article}; + use crate::api::articles::{delete_article, get_article, save_article}; let params = use_params_map(); let slug = move || params.with(|p| p.get("slug").map(|s| s.to_string()).unwrap_or_default()); @@ -316,12 +366,12 @@ pub fn JournalismArticlePage() -> impl IntoView { web_sys::console::log_1(&"Checking auth token...".into()); if let Ok(Some(storage)) = web_sys::window().unwrap().local_storage() { if let Ok(Some(t)) = storage.get_item("admin_token") { - web_sys::console::log_1(&format!("Found token: {}", t).into()); - if !t.is_empty() { + web_sys::console::log_1(&format!("Found token: {}", t).into()); + if !t.is_empty() { _set_token.set(t); _set_is_admin.set(true); web_sys::console::log_1(&"Admin mode enabled".into()); - } + } } else { web_sys::console::log_1(&"No token found in localStorage".into()); } @@ -361,7 +411,11 @@ pub fn JournalismArticlePage() -> impl IntoView { new_article.title = edit_title.get(); new_article.display_date = edit_date.get(); new_article.byline = Some(edit_byline.get()); - new_article.captions = if edit_caption.get().trim().is_empty() { vec![] } else { vec![edit_caption.get()] }; + new_article.captions = if edit_caption.get().trim().is_empty() { + vec![] + } else { + vec![edit_caption.get()] + }; new_article.images = edit_images.get(); new_article.content_html = edit_html.get(); @@ -370,7 +424,7 @@ pub fn JournalismArticlePage() -> impl IntoView { set_save_status.set("Saved!".to_string()); set_is_editing.set(false); article_resource.refetch(); - }, + } Err(e) => set_save_status.set(format!("Error: {}", e)), } }); @@ -379,7 +433,11 @@ pub fn JournalismArticlePage() -> impl IntoView { let on_delete = move |slug: String| { #[cfg(target_arch = "wasm32")] { - if !web_sys::window().unwrap().confirm_with_message("Are you sure you want to delete this article?").unwrap() { + if !web_sys::window() + .unwrap() + .confirm_with_message("Are you sure you want to delete this article?") + .unwrap() + { return; } } @@ -390,10 +448,12 @@ pub fn JournalismArticlePage() -> impl IntoView { Ok(_) => { let navigate = leptos_router::hooks::use_navigate(); navigate("/journalism", Default::default()); - }, + } Err(e) => { #[cfg(target_arch = "wasm32")] - let _ = web_sys::window().unwrap().alert_with_message(&format!("Error deleting: {}", e)); + let _ = web_sys::window() + .unwrap() + .alert_with_message(&format!("Error deleting: {}", e)); #[cfg(not(target_arch = "wasm32"))] leptos::logging::error!("Error deleting: {}", e); } diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index c73e1242..f011a82e 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -86,3 +86,32 @@ CREATE TABLE projects ( is_featured BOOLEAN DEFAULT FALSE, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); + +-- Triggers for UUID generation on primary keys +CREATE TRIGGER set_users_id AFTER INSERT ON users +WHEN NEW.id IS NULL +BEGIN UPDATE users SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_articles_id AFTER INSERT ON articles +WHEN NEW.id IS NULL +BEGIN UPDATE articles SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_blog_posts_id AFTER INSERT ON blog_posts +WHEN NEW.id IS NULL +BEGIN UPDATE blog_posts SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_creative_works_id AFTER INSERT ON creative_works +WHEN NEW.id IS NULL +BEGIN UPDATE creative_works SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_media_items_id AFTER INSERT ON media_items +WHEN NEW.id IS NULL +BEGIN UPDATE media_items SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_music_tracks_id AFTER INSERT ON music_tracks +WHEN NEW.id IS NULL +BEGIN UPDATE music_tracks SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; + +CREATE TRIGGER set_projects_id AFTER INSERT ON projects +WHEN NEW.id IS NULL +BEGIN UPDATE projects SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; From e1ef11c37ec7bae692bc31eb5a8df41a1063df80 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 13:04:21 -0700 Subject: [PATCH 11/64] refactor: improve backend API error handling, optimize SQLite connections, and refine frontend code and Docker configurations. --- backend/src/api/public.rs | 17 +++-- backend/src/main.rs | 2 +- docker-compose.prod.yaml | 2 +- docker-compose.yaml | 6 +- frontend/src/api/articles.rs | 4 +- frontend/src/app.rs | 1 - frontend/src/components/media_picker.rs | 84 +++++++++++-------------- frontend/src/pages/sections.rs | 23 +++---- scripts/setup-dev.sh | 3 +- 9 files changed, 67 insertions(+), 75 deletions(-) diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 18834fe4..44896eb8 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -15,7 +15,9 @@ async fn health_check() -> &'static str { use sqlx::Row; -async fn list_articles(State(pool): State) -> Json> { +async fn list_articles( + State(pool): State, +) -> Result>, axum::http::StatusCode> { match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT 20") .map(|row: sqlx::sqlite::SqliteRow| { let origin_str: String = row.get("origin"); @@ -41,15 +43,18 @@ async fn list_articles(State(pool): State) -> Json> { .fetch_all(&pool) .await { - Ok(articles) => Json(articles), + Ok(articles) => Ok(Json(articles)), Err(e) => { tracing::error!("Failed to fetch articles: {}", e); - Json(Vec::new()) + Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR) } } } -async fn list_blog_posts(State(pool): State) -> Json> { +#[allow(dead_code)] +async fn list_blog_posts( + State(pool): State, +) -> Result>, axum::http::StatusCode> { match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") .map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); @@ -72,10 +77,10 @@ async fn list_blog_posts(State(pool): State) -> Json> .fetch_all(&pool) .await { - Ok(posts) => Json(posts), + Ok(posts) => Ok(Json(posts)), Err(e) => { tracing::error!("Failed to fetch blog posts: {}", e); - Json(Vec::new()) + Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR) } } } diff --git a/backend/src/main.rs b/backend/src/main.rs index 603a9e83..360d381f 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -47,7 +47,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(5) + .max_connections(1) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/docker-compose.prod.yaml b/docker-compose.prod.yaml index 35d44e0f..5ba32dce 100644 --- a/docker-compose.prod.yaml +++ b/docker-compose.prod.yaml @@ -3,7 +3,7 @@ services: build: . restart: always environment: - - DATABASE_URL=sqlite://data/sqlite.db + - DATABASE_URL=sqlite:////app/data/sqlite.db - LEPTOS_SITE_ADDR=0.0.0.0:3000 - RUST_LOG=info networks: diff --git a/docker-compose.yaml b/docker-compose.yaml index cecb119c..cd2d32eb 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,5 +1,7 @@ version: "3.8" -# services: - # The web and backend services will now use a local SQLite DB file. +services: + db_stub: + image: alpine:latest + command: echo "SQLite used directly, no db service required." diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index 7e34aad8..b7371682 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -71,7 +71,7 @@ pub async fn get_articles() -> Result, ServerFnError> { if let Ok(entries) = fs::read_dir(dir) { for entry in entries.flatten() { let path = entry.path(); - if path.extension().map_or(false, |ext| ext == "json") { + if path.extension().is_some_and(|ext| ext == "json") { if let Ok(content) = fs::read_to_string(&path) { if let Ok(article) = serde_json::from_str::
(&content) { articles.push(article); @@ -181,7 +181,7 @@ pub async fn list_media(token: String) -> Result, ServerFnError> } // Skip directories if let Some(path) = line.strip_prefix("gs://jakewray-portfolio/") { - let name = path.split('/').last().unwrap_or(path).to_string(); + let name = path.split('/').next_back().unwrap_or(path).to_string(); items.push(MediaItem { url: format!("{}/{}", base_url, path), name, diff --git a/frontend/src/app.rs b/frontend/src/app.rs index 7e8727a3..a2c95873 100644 --- a/frontend/src/app.rs +++ b/frontend/src/app.rs @@ -101,7 +101,6 @@ fn AdminRedirect() -> impl IntoView { leptos::prelude::Effect::new(move || { navigate("/admin/login", Default::default()); }); - view! {} } #[component] diff --git a/frontend/src/components/media_picker.rs b/frontend/src/components/media_picker.rs index 26592a5e..60d9ddce 100644 --- a/frontend/src/components/media_picker.rs +++ b/frontend/src/components/media_picker.rs @@ -19,63 +19,53 @@ where let (uploading, set_uploading) = signal(false); let (error_msg, set_error_msg) = signal(String::new()); - let fetch_media = { - let token = token.clone(); - move || { - set_loading.set(true); - let t = token.get(); - spawn_local(async move { - match list_media(t).await { - Ok(res) => set_items.set(res), - Err(e) => set_error_msg.set(format!("Error: {}", e)), - } - set_loading.set(false); - }); - } + let fetch_media = move || { + set_loading.set(true); + let t = token.get(); + spawn_local(async move { + match list_media(t).await { + Ok(res) => set_items.set(res), + Err(e) => set_error_msg.set(format!("Error: {}", e)), + } + set_loading.set(false); + }); }; // Initial fetch - Effect::new({ - let fetch = fetch_media.clone(); - move || { - fetch(); - } + Effect::new(move || { + fetch_media(); }); - let on_upload = { - let token = token.clone(); - let fetch = fetch_media.clone(); - move |ev: ev::Event| { - let input: HtmlInputElement = event_target(&ev); - let files: Option = input.files(); - if let Some(files) = files { - if let Some(file) = files.get(0) { - let t = token.get(); - let f_clone = fetch.clone(); - let filename = file.name(); - let file_clone = file.clone(); // web_sys::File is Clone (JsValue wrapper) - set_uploading.set(true); + let on_upload = move |ev: ev::Event| { + let input: HtmlInputElement = event_target(&ev); + let files: Option = input.files(); + if let Some(files) = files { + if let Some(file) = files.get(0) { + let t = token.get(); + let f_clone = fetch_media; + let filename = file.name(); + let file_clone = file.clone(); // web_sys::File is Clone (JsValue wrapper) + set_uploading.set(true); - spawn_local(async move { - // Read file as bytes via web_sys - let array_buffer_promise = file_clone.array_buffer(); - match JsFuture::from(array_buffer_promise).await { - Ok(array_buffer) => { - let uint8_array = js_sys::Uint8Array::new(&array_buffer); - let bytes = uint8_array.to_vec(); + spawn_local(async move { + // Read file as bytes via web_sys + let array_buffer_promise = file_clone.array_buffer(); + match JsFuture::from(array_buffer_promise).await { + Ok(array_buffer) => { + let uint8_array = js_sys::Uint8Array::new(&array_buffer); + let bytes = uint8_array.to_vec(); - match upload_media(t, filename, bytes).await { - Ok(_url) => { - f_clone(); // Refresh list - } - Err(e) => set_error_msg.set(format!("Upload failed: {}", e)), + match upload_media(t, filename, bytes).await { + Ok(_url) => { + f_clone(); // Refresh list } + Err(e) => set_error_msg.set(format!("Upload failed: {}", e)), } - Err(e) => set_error_msg.set(format!("File read failed: {:?}", e)), } - set_uploading.set(false); - }); - } + Err(e) => set_error_msg.set(format!("File read failed: {:?}", e)), + } + set_uploading.set(false); + }); } } }; diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index b556544c..31c7e3c9 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -50,10 +50,9 @@ fn starts_with_month(s: &str) -> bool { "December", ]; MONTHS.iter().any(|m| { - if sm.starts_with(m) { - let after = &sm[m.len()..]; + if let Some(after) = sm.strip_prefix(m) { // Match if it's the end of string or next char is not a letter - after.chars().next().map_or(true, |c| !c.is_alphabetic()) + after.chars().next().is_none_or(|c| !c.is_alphabetic()) } else { false } @@ -213,11 +212,9 @@ fn linkify_images(html: &str) -> String { // Extract src let src = if let Some(src_start_rel) = img_tag.find("src=\"") { let after_src = src_start_rel + 5; - if let Some(src_end_rel) = img_tag[after_src..].find('"') { - Some(&img_tag[after_src..after_src + src_end_rel]) - } else { - None - } + img_tag[after_src..] + .find('"') + .map(|src_end_rel| &img_tag[after_src..after_src + src_end_rel]) } else { None }; @@ -279,15 +276,13 @@ fn italicize_origin_line(html: &str) -> String { } fn format_cp_style(date: &str) -> String { - let date = date - .replace("January", "Jan.") + date.replace("January", "Jan.") .replace("February", "Feb.") .replace("August", "Aug.") .replace("September", "Sept.") .replace("October", "Oct.") .replace("November", "Nov.") - .replace("December", "Dec."); - date + .replace("December", "Dec.") } #[component] @@ -312,7 +307,7 @@ pub fn JournalismPage() -> impl IntoView { let title = article.title.clone(); let preview_text = extract_body_preview(&article.content_html) .unwrap_or_else(|| article.excerpt.clone()); - let image = article.images.get(0).cloned(); + let image = article.images.first().cloned(); let thumb_src = image.clone().unwrap_or_else(|| "data:image/svg+xml;utf8,Image coming soon".to_string()); let date = extract_printed_date(&article.content_html) .unwrap_or_else(|| article.display_date.clone()); @@ -354,7 +349,7 @@ pub fn JournalismArticlePage() -> impl IntoView { let params = use_params_map(); let slug = move || params.with(|p| p.get("slug").map(|s| s.to_string()).unwrap_or_default()); - let article_resource = Resource::new(slug, |s| get_article(s)); + let article_resource = Resource::new(slug, get_article); // Auth State let (is_admin, _set_is_admin) = signal(false); diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 835dd9a0..64ae8461 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -64,8 +64,9 @@ echo "👤 Creating default admin user..." # Note: If uuidgen is unavailable, a hardcoded fallback UUID is used. # This results in shared admin IDs across affected dev installs, which is acceptable for dev environments. ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") +SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') sqlite3 sqlite.db < Date: Thu, 19 Mar 2026 13:10:30 -0700 Subject: [PATCH 12/64] fix: Correct UUID generation and parsing in migrations, scripts, and API queries, remove docker-compose stub, and add dev setup validation. --- README.md | 4 +++- backend/src/api/public.rs | 20 +++++++++++-------- docker-compose.yaml | 7 ------- .../20260210120000_seed_admin_password.sql | 2 +- scripts/setup-dev.sh | 6 ++++++ 5 files changed, 22 insertions(+), 17 deletions(-) delete mode 100644 docker-compose.yaml diff --git a/README.md b/README.md index 4506a5eb..4364797f 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ For first-time SSL setup on the server: - **Reverse Proxy**: Nginx with Let's Encrypt SSL - **Deployment**: Docker Compose +### Known Limitations +- **Database Concurrency**: The application uses embedded SQLite in WAL mode with a small connection pool (`max_connections(5)`). SQLite only allows one concurrent writer. Concurrent write bursts will queue (up to a 5s busy timeout) and could fail under heavy write load. This is acceptable for a personal blog/portfolio, but must be accounted for if write traffic scales. + ## Development ### Quick Start with Nix (Recommended) @@ -49,7 +52,6 @@ cargo leptos watch - `backend/` - Server-side Rust code - `frontend/` - Client-side Leptos components - `shared/` - Shared types and utilities -- `migration/` - Database migration tools - `flake.nix` - Nix development environment - `.envrc` - direnv configuration diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 44896eb8..2e257cf7 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -19,15 +19,17 @@ async fn list_articles( State(pool): State, ) -> Result>, axum::http::StatusCode> { match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT 20") - .map(|row: sqlx::sqlite::SqliteRow| { + .try_map(|row: sqlx::sqlite::SqliteRow| { let origin_str: String = row.get("origin"); let origin = match origin_str.as_str() { "imported" => shared::Origin::Imported, "synced" => shared::Origin::Synced, _ => shared::Origin::Local, }; - Article { - id: row.get::("id"), + let id_str: String = row.get("id"); + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(Article { + id, wp_id: row.get("wp_id"), slug: row.get("slug"), title: row.get("title"), @@ -38,7 +40,7 @@ async fn list_articles( author: row.get("author"), published_at: row.get("published_at"), origin, - } + }) }) .fetch_all(&pool) .await @@ -56,7 +58,7 @@ async fn list_blog_posts( State(pool): State, ) -> Result>, axum::http::StatusCode> { match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") - .map(|row: sqlx::sqlite::SqliteRow| { + .try_map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); let tags = tags_str.and_then(|s| match serde_json::from_str(&s) { Ok(t) => Some(t), @@ -65,14 +67,16 @@ async fn list_blog_posts( None } }); - BlogPost { - id: row.get::("id"), + let id_str: String = row.get("id"); + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(BlogPost { + id, slug: row.get("slug"), title: row.get("title"), content: row.get("content"), published_at: row.get("published_at"), tags, - } + }) }) .fetch_all(&pool) .await diff --git a/docker-compose.yaml b/docker-compose.yaml deleted file mode 100644 index cd2d32eb..00000000 --- a/docker-compose.yaml +++ /dev/null @@ -1,7 +0,0 @@ -version: "3.8" - -services: - db_stub: - image: alpine:latest - command: echo "SQLite used directly, no db service required." - diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index e1e67017..943a56e0 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -2,7 +2,7 @@ -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) VALUES ( - lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-a' || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6))), + lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(1) & X'3f' | X'80')) || lower(hex(randomblob(1))) || '-' || lower(hex(randomblob(6))), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI' ) ON CONFLICT (username) diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 64ae8461..90a6a166 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -65,6 +65,12 @@ echo "👤 Creating default admin user..." # This results in shared admin IDs across affected dev installs, which is acceptable for dev environments. ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') + +if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$ ]]; then + echo "❌ Invalid Admin UUID format generated: $SAFE_UUID" + exit 1 +fi + sqlite3 sqlite.db < Date: Thu, 19 Mar 2026 13:23:14 -0700 Subject: [PATCH 13/64] feat: Centralize UUID generation in application logic, remove database triggers, and enforce JWT secret via environment variables. --- .envrc | 2 +- backend/src/api/admin.rs | 20 +++++++-------- backend/src/api/public.rs | 14 +++++----- migrations/20260110000000_initial_schema.sql | 27 -------------------- scripts/setup-dev.sh | 5 ++-- 5 files changed, 20 insertions(+), 48 deletions(-) diff --git a/.envrc b/.envrc index c739b755..d0a23f02 100644 --- a/.envrc +++ b/.envrc @@ -3,7 +3,7 @@ use flake # Load environment variables export RUST_LOG=info -export DATABASE_URL="sqlite:sqlite.db" +export DATABASE_URL="sqlite://sqlite.db" export RUST_BACKTRACE=1 # Optional: Print helpful info when entering directory diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index c0e74738..389701e1 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -20,10 +20,10 @@ use jsonwebtoken::{encode, EncodingKey, Header}; use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; -fn get_jwt_secret() -> &'static [u8] { - // In production, use environment variable: std::env::var("JWT_SECRET").unwrap_or_default().as_bytes() - // For now using a default that should be changed - b"change-this-secret-key-in-production-environment" +fn get_jwt_secret() -> Vec { + std::env::var("JWT_SECRET") + .expect("JWT_SECRET environment variable must be set") + .into_bytes() } #[derive(Serialize, Deserialize)] @@ -51,7 +51,7 @@ pub struct ChangePasswordRequest { #[derive(sqlx::FromRow)] struct UserRow { - id: uuid::Uuid, + id: String, password_hash: String, } @@ -148,14 +148,14 @@ async fn login( let exp = (Utc::now() + Duration::hours(24)).timestamp() as usize; let claims = Claims { - sub: user.id.to_string(), + sub: user.id.clone(), exp, }; let token = encode( &Header::default(), &claims, - &EncodingKey::from_secret(get_jwt_secret()), + &EncodingKey::from_secret(&get_jwt_secret()), ) .map_err(|_| { ( @@ -203,7 +203,7 @@ async fn change_password( let validation = jsonwebtoken::Validation::default(); let token_data = jsonwebtoken::decode::( token, - &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), + &jsonwebtoken::DecodingKey::from_secret(&get_jwt_secret()), &validation, ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; @@ -217,7 +217,7 @@ async fn change_password( // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(user_id) + .bind(user_id.to_string()) .fetch_optional(&pool) .await .map_err(|_| { @@ -246,7 +246,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id) + .bind(user_id.to_string()) .execute(&pool) .await .map_err(|_| { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 2e257cf7..eeef5cab 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -60,13 +60,13 @@ async fn list_blog_posts( match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") .try_map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); - let tags = tags_str.and_then(|s| match serde_json::from_str(&s) { - Ok(t) => Some(t), - Err(e) => { - tracing::warn!("Failed to deserialize tags: {}", e); - None - } - }); + let tags = match tags_str { + Some(s) => match serde_json::from_str(&s) { + Ok(t) => Some(t), + Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), + }, + None => None, + }; let id_str: String = row.get("id"); let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; Ok(BlogPost { diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index f011a82e..34f6e401 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -87,31 +87,4 @@ CREATE TABLE projects ( created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); --- Triggers for UUID generation on primary keys -CREATE TRIGGER set_users_id AFTER INSERT ON users -WHEN NEW.id IS NULL -BEGIN UPDATE users SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; -CREATE TRIGGER set_articles_id AFTER INSERT ON articles -WHEN NEW.id IS NULL -BEGIN UPDATE articles SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; - -CREATE TRIGGER set_blog_posts_id AFTER INSERT ON blog_posts -WHEN NEW.id IS NULL -BEGIN UPDATE blog_posts SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; - -CREATE TRIGGER set_creative_works_id AFTER INSERT ON creative_works -WHEN NEW.id IS NULL -BEGIN UPDATE creative_works SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; - -CREATE TRIGGER set_media_items_id AFTER INSERT ON media_items -WHEN NEW.id IS NULL -BEGIN UPDATE media_items SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; - -CREATE TRIGGER set_music_tracks_id AFTER INSERT ON music_tracks -WHEN NEW.id IS NULL -BEGIN UPDATE music_tracks SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; - -CREATE TRIGGER set_projects_id AFTER INSERT ON projects -WHEN NEW.id IS NULL -BEGIN UPDATE projects SET id = lower(hex(randomblob(16))) WHERE rowid = NEW.rowid; END; diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 90a6a166..30dc54b2 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -61,9 +61,8 @@ cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" echo "👤 Creating default admin user..." -# Note: If uuidgen is unavailable, a hardcoded fallback UUID is used. -# This results in shared admin IDs across affected dev installs, which is acceptable for dev environments. -ADMIN_UUID=$(uuidgen 2>/dev/null || echo "dfbfb952-b8ec-4bd8-b1aa-ed154109addf") +# Fallback to python UUID or kernel uuid if uuidgen missing +ADMIN_UUID=$(uuidgen 2>/dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c 'import uuid; print(uuid.uuid4())' 2>/dev/null || { echo "❌ Could not generate a UUID. Please install uuidgen."; exit 1; }) SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$ ]]; then From 80aa5b3233e8126d312d45e3531ff1a7d21b6f25 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 13:57:55 -0700 Subject: [PATCH 14/64] Refactor JWT secret management with `OnceLock`, sanitize uploaded filenames, validate JWT in the `me` endpoint, increase database connection pool size, and update admin seed ID generation. --- backend/src/api/admin.rs | 33 +++++++++++++++---- backend/src/api/mod.rs | 2 +- backend/src/main.rs | 5 ++- frontend/src/api/articles.rs | 9 ++++- .../20260210120000_seed_admin_password.sql | 2 +- 5 files changed, 40 insertions(+), 11 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 389701e1..4f525a51 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -20,10 +20,21 @@ use jsonwebtoken::{encode, EncodingKey, Header}; use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; -fn get_jwt_secret() -> Vec { - std::env::var("JWT_SECRET") +use std::sync::OnceLock; + +static JWT_SECRET: OnceLock> = OnceLock::new(); + +pub fn init_jwt_secret() { + let secret = std::env::var("JWT_SECRET") .expect("JWT_SECRET environment variable must be set") - .into_bytes() + .into_bytes(); + JWT_SECRET + .set(secret) + .expect("JWT_SECRET initialized twice"); +} + +fn get_jwt_secret() -> &'static [u8] { + JWT_SECRET.get().expect("JWT_SECRET not initialized") } #[derive(Serialize, Deserialize)] @@ -97,7 +108,7 @@ async fn login( .get(header::ACCEPT) .and_then(|v| v.to_str().ok()) .unwrap_or(""); - let bytes = to_bytes(body, 64 * 1024) + let bytes = to_bytes(body, 16 * 1024) .await .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; @@ -155,7 +166,7 @@ async fn login( let token = encode( &Header::default(), &claims, - &EncodingKey::from_secret(&get_jwt_secret()), + &EncodingKey::from_secret(get_jwt_secret()), ) .map_err(|_| { ( @@ -179,12 +190,20 @@ async fn login( } async fn me(headers: HeaderMap) -> Result<&'static str, StatusCode> { - headers + let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) .and_then(|s| s.strip_prefix("Bearer ")) .ok_or(StatusCode::UNAUTHORIZED)?; + let validation = jsonwebtoken::Validation::default(); + jsonwebtoken::decode::( + token, + &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), + &validation, + ) + .map_err(|_| StatusCode::UNAUTHORIZED)?; + Ok("Authenticated") } @@ -203,7 +222,7 @@ async fn change_password( let validation = jsonwebtoken::Validation::default(); let token_data = jsonwebtoken::decode::( token, - &jsonwebtoken::DecodingKey::from_secret(&get_jwt_secret()), + &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), &validation, ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index 24e0ee32..84c5652c 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -1,6 +1,6 @@ use axum::Router; -mod admin; +pub mod admin; mod public; pub fn router(state: crate::state::AppState) -> Router { diff --git a/backend/src/main.rs b/backend/src/main.rs index 360d381f..bc0071c3 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -35,6 +35,9 @@ async fn main() -> Result<(), Box> { .with(tracing_subscriber::fmt::layer()) .init(); + // Initialize JWT Secret early so it panics at startup if missing + api::admin::init_jwt_secret(); + // Improved error handling for DATABASE_URL let database_url = std::env::var("DATABASE_URL") .map_err(|_| "DATABASE_URL environment variable must be set")?; @@ -47,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(1) + .max_connections(8) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index b7371682..f3bac3a8 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -206,7 +206,14 @@ pub async fn upload_media( // We'll upload to a 'uploads' folder for manual picking or sorting later let timestamp = chrono::Utc::now().timestamp(); - let safe_name = format!("{}_{}", timestamp, filename.replace(" ", "_")); + let safe_name = format!( + "{}_{}", + timestamp, + filename + .chars() + .filter(|c| c.is_alphanumeric() || *c == '.' || *c == '-' || *c == '_') + .collect::() + ); let destination = format!( "gs://jakewray-portfolio/media/journalism/uploads/{}", safe_name diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index 943a56e0..ae41a508 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -2,7 +2,7 @@ -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) VALUES ( - lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(1) & X'3f' | X'80')) || lower(hex(randomblob(1))) || '-' || lower(hex(randomblob(6))), + lower(hex(randomblob(16))), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI' ) ON CONFLICT (username) From d7f9c87b20d245820d889cd55e85922867ffc0ed Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 14:41:14 -0700 Subject: [PATCH 15/64] feat: Implement blog post listing API, enhance security with environment-based JWT secret and timing attack prevention, and refine deployment with data directory setup and UUID generation. --- backend/src/api/admin.rs | 6 +++++- backend/src/api/public.rs | 2 +- frontend/src/api/articles.rs | 7 ++++--- migrations/20260210120000_seed_admin_password.sql | 5 ++++- scripts/deploy.sh | 3 ++- 5 files changed, 16 insertions(+), 7 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 4f525a51..586983c3 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -141,7 +141,11 @@ async fn login( let is_invalid = match user { Some(ref u) => !verify_password(&req.password, &u.password_hash), - None => true, + None => { + static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI"; + let _ = verify_password(&req.password, DUMMY_HASH); + true + } }; if is_invalid { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index eeef5cab..4ebf5975 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -6,6 +6,7 @@ pub fn router(state: crate::state::AppState) -> Router { Router::new() .route("/health", get(health_check)) .route("/api/articles", get(list_articles)) + .route("/api/blog", get(list_blog_posts)) .with_state(state) } @@ -53,7 +54,6 @@ async fn list_articles( } } -#[allow(dead_code)] async fn list_blog_posts( State(pool): State, ) -> Result>, axum::http::StatusCode> { diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index f3bac3a8..9b146a68 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -46,12 +46,13 @@ pub mod ssr_utils { } // WARN: Synchronize this secret with backend/src/api/admin.rs - // Ideally, use an ENV var. - let secret = b"change-this-secret-key-in-production-environment"; + let secret = std::env::var("JWT_SECRET") + .expect("JWT_SECRET environment variable must be set") + .into_bytes(); let token_data = decode::( token, - &DecodingKey::from_secret(secret), + &DecodingKey::from_secret(&secret), &Validation::new(Algorithm::HS256), ) .map_err(|_| ServerFnError::new("Invalid token"))?; diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index ae41a508..a236b175 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -2,7 +2,10 @@ -- Password: ZO6gOCn0icxcvrke62F96A== INSERT INTO users (id, username, password_hash) VALUES ( - lower(hex(randomblob(16))), + lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || + substr(lower(hex(randomblob(2))),2) || '-' || + substr('89ab', abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || + lower(hex(randomblob(6))), 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI' ) ON CONFLICT (username) diff --git a/scripts/deploy.sh b/scripts/deploy.sh index 29f57af6..0f1c7ac7 100755 --- a/scripts/deploy.sh +++ b/scripts/deploy.sh @@ -15,7 +15,8 @@ echo "Deploying target: $TARGET" # rsync/scp will overwrite changed files. echo "Preparing remote directory..." gcloud compute ssh jake-user@$INSTANCE_NAME --project=$PROJECT_ID --zone=$ZONE --command=" - mkdir -p ~/app && \ + mkdir -p ~/app/data && \ + chmod 700 ~/app/data && \ sudo chown -R jake-user:jake-user ~/app " From 5bd6044a5dc9348da6c49a1b067b14c7480be24e Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 14:56:58 -0700 Subject: [PATCH 16/64] feat: Implement login rate limiting, remove default admin password, add database indexes, and enforce password length validation. --- Cargo.lock | 149 +++++++++++++++++- backend/Cargo.toml | 1 + backend/src/api/admin.rs | 27 +++- backend/src/main.rs | 8 +- .../20260210120000_seed_admin_password.sql | 15 +- ...0260319000000_add_published_at_indexes.sql | 3 + scripts/setup-dev.sh | 1 - 7 files changed, 182 insertions(+), 22 deletions(-) create mode 100644 migrations/20260319000000_add_published_at_indexes.sql diff --git a/Cargo.lock b/Cargo.lock index 013f586d..23773dd4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,6 +211,7 @@ dependencies = [ "tokio", "tower", "tower-http", + "tower_governor", "tracing", "tracing-subscriber", "uuid", @@ -463,6 +464,19 @@ dependencies = [ "typenum", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "dashmap" version = "6.1.0" @@ -657,6 +671,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror 1.0.69", +] + [[package]] name = "frontend" version = "0.1.0" @@ -764,6 +788,12 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.31" @@ -873,6 +903,26 @@ dependencies = [ "web-sys", ] +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap 5.5.3", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand", + "smallvec", + "spinning_top", +] + [[package]] name = "guardian" version = "1.3.0" @@ -1314,7 +1364,7 @@ checksum = "b5e4b8a1c190800d24f0c79e6c320186ad31ca8800e54c057ad65cdf452ff7d0" dependencies = [ "any_spawner", "axum", - "dashmap", + "dashmap 6.1.0", "futures", "hydration_context", "leptos", @@ -1651,6 +1701,24 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60993920e071b0c9b66f14e2b32740a4e27ffc82854dcd72035887f336a09a28" +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -1890,6 +1958,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + [[package]] name = "potential_utf" version = "0.1.4" @@ -1979,6 +2053,21 @@ dependencies = [ "yansi", ] +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quote" version = "1.0.43" @@ -2046,6 +2135,15 @@ dependencies = [ "getrandom 0.2.16", ] +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags", +] + [[package]] name = "reactive_graph" version = "0.1.8" @@ -2362,7 +2460,7 @@ dependencies = [ "axum", "bytes", "const_format", - "dashmap", + "dashmap 6.1.0", "futures", "gloo-net", "http", @@ -2534,6 +2632,15 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -3090,6 +3197,22 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tower_governor" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aea939ea6cfa7c4880f3e7422616624f97a567c16df67b53b11f0d03917a8e46" +dependencies = [ + "axum", + "forwarded-header-value", + "governor", + "http", + "pin-project", + "thiserror 1.0.69", + "tower", + "tracing", +] + [[package]] name = "tracing" version = "0.1.44" @@ -3423,6 +3546,22 @@ dependencies = [ "wasite", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + [[package]] name = "winapi-util" version = "0.1.11" @@ -3432,6 +3571,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.62.2" diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 83fb500d..bee963e2 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -37,6 +37,7 @@ leptos_axum = { workspace = true } frontend = { path = "../frontend", default-features = false, features = [ "ssr", ] } +tower_governor = "0.4.2" [features] default = [] diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 586983c3..600ec4ac 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -86,8 +86,22 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { + // Configure rate limit: 1 request per 3 seconds, up to 5 burst + let governor_conf = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .per_second(2) // Wait, let's use per_second(1) or something. Wait, user said "keyed on IP". + // 2 per second is okay, but 1 request per 2 seconds is better. + .per_second(2) + .burst_size(5) + .finish() + .unwrap(), + ); + let governor_layer = tower_governor::GovernorLayer { + config: governor_conf, + }; + Router::new() - .route("/login", post(login)) + .route("/login", post(login).route_layer(governor_layer)) .route("/password", post(change_password)) .route("/me", get(me)) .with_state(state) @@ -142,7 +156,7 @@ async fn login( let is_invalid = match user { Some(ref u) => !verify_password(&req.password, &u.password_hash), None => { - static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI"; + static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$DummyDummyDummyDummy$DummyDummyDummyDummyDummyDummyDummyDummyDummy"; let _ = verify_password(&req.password, DUMMY_HASH); true } @@ -193,7 +207,7 @@ async fn login( } } -async fn me(headers: HeaderMap) -> Result<&'static str, StatusCode> { +async fn me(headers: HeaderMap) -> Result { let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -201,14 +215,14 @@ async fn me(headers: HeaderMap) -> Result<&'static str, StatusCode> { .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::default(); - jsonwebtoken::decode::( + let token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), &validation, ) .map_err(|_| StatusCode::UNAUTHORIZED)?; - Ok("Authenticated") + Ok(token_data.claims.sub) } async fn change_password( @@ -216,6 +230,9 @@ async fn change_password( headers: HeaderMap, Json(req): Json, ) -> Result { + if req.new_password.len() < 12 { + return Err((StatusCode::BAD_REQUEST, "Password too short".to_string())); + } let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) diff --git a/backend/src/main.rs b/backend/src/main.rs index bc0071c3..c59cb08a 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(8) + .max_connections(2) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; @@ -115,7 +115,11 @@ async fn main() -> Result<(), Box> { tracing::info!("listening on http://{}", &addr); let listener = tokio::net::TcpListener::bind(&addr).await?; - axum::serve(listener, app.into_make_service()).await?; + axum::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await?; Ok(()) } diff --git a/migrations/20260210120000_seed_admin_password.sql b/migrations/20260210120000_seed_admin_password.sql index a236b175..899ea320 100644 --- a/migrations/20260210120000_seed_admin_password.sql +++ b/migrations/20260210120000_seed_admin_password.sql @@ -1,12 +1,3 @@ --- Update or insert admin user with secure default password --- Password: ZO6gOCn0icxcvrke62F96A== -INSERT INTO users (id, username, password_hash) -VALUES ( - lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || - substr(lower(hex(randomblob(2))),2) || '-' || - substr('89ab', abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || - lower(hex(randomblob(6))), - 'admin', '$argon2id$v=19$m=19456,t=2,p=1$Ewiz6jCZu9NGQaAJtWRLqg$Fn5yB19PZG+eTq/f1oKbw+tsqvhwuAnMI3TpQCIg9vI' -) -ON CONFLICT (username) -DO UPDATE SET password_hash = EXCLUDED.password_hash; +-- Local development environments inject the password via scripts/setup-dev.sh. +-- Production environments require the admin password to be set manually or via an env var initialization script on first-run. +-- The default static password hash has been removed from this file. diff --git a/migrations/20260319000000_add_published_at_indexes.sql b/migrations/20260319000000_add_published_at_indexes.sql new file mode 100644 index 00000000..83c6fc8b --- /dev/null +++ b/migrations/20260319000000_add_published_at_indexes.sql @@ -0,0 +1,3 @@ +-- Add indexes on published_at to optimize listing articles and blog posts +CREATE INDEX IF NOT EXISTS idx_articles_published_at ON articles(published_at DESC); +CREATE INDEX IF NOT EXISTS idx_blog_posts_published_at ON blog_posts(published_at DESC); diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 30dc54b2..c604ff8d 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -56,7 +56,6 @@ echo "⏳ Running database migrations..." touch sqlite.db export DATABASE_URL="sqlite://sqlite.db" -cargo sqlx database create || true cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" From b2c6c15e27ab6332e677b10ab51bcf92c69a6a15 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 15:07:52 -0700 Subject: [PATCH 17/64] feat: Implement Argon2 for password hashing, refine admin API rate limits, and add a startup warning for empty user tables. --- README.md | 2 +- backend/Cargo.toml | 2 +- backend/src/api/admin.rs | 17 +++++------------ backend/src/main.rs | 13 ++++++++++++- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 4364797f..388e827e 100644 --- a/README.md +++ b/README.md @@ -63,5 +63,5 @@ cargo leptos watch - [ ] **Admin features** - Post creation, sync manager - [ ] **Content sync** - Import from terracestandard.com - [ ] **Media library** - Photo/video management -- [ ] **Password hashing** - Bcrypt implementation +- [x] **Password hashing** - Argon2 implementation - [ ] **Password reset** - Email-based recovery diff --git a/backend/Cargo.toml b/backend/Cargo.toml index bee963e2..37032368 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -13,7 +13,7 @@ sqlx = { version = "0.8", features = [ "chrono", "macros", ] } -argon2 = "0.5" +argon2 = { version = "0.5", features = ["std", "password-hash"] } rand = "0.8" uuid = { version = "1.0", features = ["serde", "v4"] } serde = { version = "1.0", features = ["derive"] } diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 600ec4ac..e4437226 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -86,11 +86,9 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { - // Configure rate limit: 1 request per 3 seconds, up to 5 burst + // Configure rate limit: 2 requests per second, up to 5 burst let governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .per_second(2) // Wait, let's use per_second(1) or something. Wait, user said "keyed on IP". - // 2 per second is okay, but 1 request per 2 seconds is better. .per_second(2) .burst_size(5) .finish() @@ -156,7 +154,7 @@ async fn login( let is_invalid = match user { Some(ref u) => !verify_password(&req.password, &u.password_hash), None => { - static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$DummyDummyDummyDummy$DummyDummyDummyDummyDummyDummyDummyDummyDummy"; + static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; let _ = verify_password(&req.password, DUMMY_HASH); true } @@ -248,16 +246,11 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - let user_id = token_data.claims.sub.parse::().map_err(|_| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Invalid user ID in token".to_string(), - ) - })?; + let user_id = &token_data.claims.sub; // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(user_id.to_string()) + .bind(user_id) .fetch_optional(&pool) .await .map_err(|_| { @@ -286,7 +279,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id.to_string()) + .bind(user_id) .execute(&pool) .await .map_err(|_| { diff --git a/backend/src/main.rs b/backend/src/main.rs index c59cb08a..776a8eb7 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(2) + .max_connections(1) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; @@ -64,6 +64,17 @@ async fn main() -> Result<(), Box> { e })?; + let user_count: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users") + .fetch_one(&pool) + .await + .unwrap_or((0,)); + if user_count.0 == 0 { + tracing::warn!("====================================================================="); + tracing::warn!("WARNING: The 'users' table is empty. No admin user exists."); + tracing::warn!("Run './scripts/setup-dev.sh' or inject a seed migration to create one."); + tracing::warn!("====================================================================="); + } + // Build LeptosOptions from environment/config let site_addr: SocketAddr = std::env::var("LEPTOS_SITE_ADDR") .unwrap_or_else(|_| "0.0.0.0:3000".to_string()) From 83e75e72c4292d43b21bce834d8a1aad4b06327f Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 15:20:57 -0700 Subject: [PATCH 18/64] feat: Implement IP-based rate limiting for admin routes, add default UUID generation to schema primary keys, and increase the database connection pool size. --- backend/src/api/admin.rs | 14 +++++++++----- backend/src/main.rs | 2 +- migrations/20260110000000_initial_schema.sql | 14 +++++++------- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index e4437226..e494f004 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -89,6 +89,7 @@ pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 2 requests per second, up to 5 burst let governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) .per_second(2) .burst_size(5) .finish() @@ -99,8 +100,11 @@ pub fn router(state: crate::state::AppState) -> Router { }; Router::new() - .route("/login", post(login).route_layer(governor_layer)) - .route("/password", post(change_password)) + .route("/login", post(login).route_layer(governor_layer.clone())) + .route( + "/password", + post(change_password).route_layer(governor_layer), + ) .route("/me", get(me)) .with_state(state) } @@ -205,7 +209,7 @@ async fn login( } } -async fn me(headers: HeaderMap) -> Result { +async fn me(headers: HeaderMap) -> Result, StatusCode> { let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -213,14 +217,14 @@ async fn me(headers: HeaderMap) -> Result { .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::default(); - let token_data = jsonwebtoken::decode::( + let _token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), &validation, ) .map_err(|_| StatusCode::UNAUTHORIZED)?; - Ok(token_data.claims.sub) + Ok(Json(serde_json::json!({"authenticated": true}))) } async fn change_password( diff --git a/backend/src/main.rs b/backend/src/main.rs index 776a8eb7..2a566a46 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(1) + .max_connections(5) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 34f6e401..43777b80 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,6 +1,6 @@ -- Users (Admin) CREATE TABLE users ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) @@ -8,7 +8,7 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -25,7 +25,7 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text @@ -37,7 +37,7 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' @@ -52,7 +52,7 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, url TEXT NOT NULL, -- S3 URL or local path @@ -65,7 +65,7 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, audio_url TEXT, @@ -76,7 +76,7 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( - id UUID PRIMARY KEY, + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, github_url TEXT, From 769ffec2bcdbb5805147ba16b23f24468aa700c9 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 15:37:47 -0700 Subject: [PATCH 19/64] refactor: Explicitly set JWT HS256 algorithm, optimize JWT secret loading, and refine UUID generation in schema. --- backend/src/api/admin.rs | 11 ++++++----- frontend/src/api/articles.rs | 13 +++++++++---- frontend/src/pages/admin/login.rs | 1 + migrations/20260110000000_initial_schema.sql | 14 +++++++------- 4 files changed, 23 insertions(+), 16 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index e494f004..993463f0 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -216,7 +216,7 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { .and_then(|s| s.strip_prefix("Bearer ")) .ok_or(StatusCode::UNAUTHORIZED)?; - let validation = jsonwebtoken::Validation::default(); + let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); let _token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), @@ -232,9 +232,6 @@ async fn change_password( headers: HeaderMap, Json(req): Json, ) -> Result { - if req.new_password.len() < 12 { - return Err((StatusCode::BAD_REQUEST, "Password too short".to_string())); - } let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -242,7 +239,7 @@ async fn change_password( .ok_or((StatusCode::UNAUTHORIZED, "Missing token".to_string()))?; // Verify token (simple check, ideally decode claims) - let validation = jsonwebtoken::Validation::default(); + let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); let token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), @@ -250,6 +247,10 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; + if req.new_password.len() < 12 { + return Err((StatusCode::BAD_REQUEST, "Password too short".to_string())); + } + let user_id = &token_data.claims.sub; // Verify current password diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index 9b146a68..525bc9bf 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -45,14 +45,19 @@ pub mod ssr_utils { _exp: usize, } + use std::sync::OnceLock; + static JWT_SECRET: OnceLock> = OnceLock::new(); + // WARN: Synchronize this secret with backend/src/api/admin.rs - let secret = std::env::var("JWT_SECRET") - .expect("JWT_SECRET environment variable must be set") - .into_bytes(); + let secret = JWT_SECRET.get_or_init(|| { + std::env::var("JWT_SECRET") + .expect("JWT_SECRET environment variable must be set") + .into_bytes() + }); let token_data = decode::( token, - &DecodingKey::from_secret(&secret), + &DecodingKey::from_secret(secret), &Validation::new(Algorithm::HS256), ) .map_err(|_| ServerFnError::new("Invalid token"))?; diff --git a/frontend/src/pages/admin/login.rs b/frontend/src/pages/admin/login.rs index 8d60ff0c..715ff511 100644 --- a/frontend/src/pages/admin/login.rs +++ b/frontend/src/pages/admin/login.rs @@ -49,6 +49,7 @@ pub fn AdminLoginPage() -> impl IntoView { let password_val = _password.get(); let navigate = navigate.clone(); + #[cfg(debug_assertions)] web_sys::console::log_1( &format!("[Login] Attempting login for user: {}", username_val).into(), ); diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 43777b80..a9372224 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,6 +1,6 @@ -- Users (Admin) CREATE TABLE users ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) @@ -8,7 +8,7 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -25,7 +25,7 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text @@ -37,7 +37,7 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' @@ -52,7 +52,7 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, url TEXT NOT NULL, -- S3 URL or local path @@ -65,7 +65,7 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, audio_url TEXT, @@ -76,7 +76,7 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, github_url TEXT, From 465c66371459783649f129347388b7e8b862c7b8 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 17:13:06 -0700 Subject: [PATCH 20/64] refactor: Centralize JWT secret management in a new shared module, refine admin API rate limiting, and enhance development environment safety. --- backend/src/api/admin.rs | 60 ++++++++++---------- backend/src/main.rs | 2 +- frontend/src/api/articles.rs | 12 +--- migrations/20260110000000_initial_schema.sql | 7 +++ nginx/nginx.conf | 2 +- scripts/setup-dev.sh | 9 ++- shared/Cargo.toml | 4 ++ shared/src/auth.rs | 16 ++++++ shared/src/lib.rs | 3 + 9 files changed, 71 insertions(+), 44 deletions(-) create mode 100644 shared/src/auth.rs diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 993463f0..45fe99e4 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -20,23 +20,6 @@ use jsonwebtoken::{encode, EncodingKey, Header}; use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; -use std::sync::OnceLock; - -static JWT_SECRET: OnceLock> = OnceLock::new(); - -pub fn init_jwt_secret() { - let secret = std::env::var("JWT_SECRET") - .expect("JWT_SECRET environment variable must be set") - .into_bytes(); - JWT_SECRET - .set(secret) - .expect("JWT_SECRET initialized twice"); -} - -fn get_jwt_secret() -> &'static [u8] { - JWT_SECRET.get().expect("JWT_SECRET not initialized") -} - #[derive(Serialize, Deserialize)] pub struct Claims { sub: String, @@ -87,7 +70,19 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 2 requests per second, up to 5 burst - let governor_conf = std::sync::Arc::new( + let login_governor_conf = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) + .per_second(2) + .burst_size(5) + .finish() + .unwrap(), + ); + let login_governor_layer = tower_governor::GovernorLayer { + config: login_governor_conf, + }; + + let password_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) .per_second(2) @@ -95,15 +90,15 @@ pub fn router(state: crate::state::AppState) -> Router { .finish() .unwrap(), ); - let governor_layer = tower_governor::GovernorLayer { - config: governor_conf, + let password_governor_layer = tower_governor::GovernorLayer { + config: password_governor_conf, }; Router::new() - .route("/login", post(login).route_layer(governor_layer.clone())) + .route("/login", post(login).route_layer(login_governor_layer)) .route( "/password", - post(change_password).route_layer(governor_layer), + post(change_password).route_layer(password_governor_layer), ) .route("/me", get(me)) .with_state(state) @@ -148,7 +143,8 @@ async fn login( .bind(&req.username) .fetch_optional(&pool) .await - .map_err(|_| { + .map_err(|e| { + tracing::error!("Database error during login fetch: {}", e); ( StatusCode::INTERNAL_SERVER_ERROR, "Database error".to_string(), @@ -186,9 +182,10 @@ async fn login( let token = encode( &Header::default(), &claims, - &EncodingKey::from_secret(get_jwt_secret()), + &EncodingKey::from_secret(shared::auth::get_jwt_secret()), ) - .map_err(|_| { + .map_err(|e| { + tracing::error!("Token generation failed: {}", e); ( StatusCode::INTERNAL_SERVER_ERROR, "Token generation failed".to_string(), @@ -219,7 +216,7 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); let _token_data = jsonwebtoken::decode::( token, - &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), + &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, ) .map_err(|_| StatusCode::UNAUTHORIZED)?; @@ -242,7 +239,7 @@ async fn change_password( let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); let token_data = jsonwebtoken::decode::( token, - &jsonwebtoken::DecodingKey::from_secret(get_jwt_secret()), + &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; @@ -258,7 +255,8 @@ async fn change_password( .bind(user_id) .fetch_optional(&pool) .await - .map_err(|_| { + .map_err(|e| { + tracing::error!("Database error fetching user for password change: {}", e); ( StatusCode::INTERNAL_SERVER_ERROR, "Database error".to_string(), @@ -275,7 +273,8 @@ async fn change_password( } // Hash new password and update - let new_hash = hash_password(&req.new_password).map_err(|_| { + let new_hash = hash_password(&req.new_password).map_err(|e| { + tracing::error!("Failed to hash new password: {}", e); ( StatusCode::INTERNAL_SERVER_ERROR, "Failed to hash password".to_string(), @@ -287,7 +286,8 @@ async fn change_password( .bind(user_id) .execute(&pool) .await - .map_err(|_| { + .map_err(|e| { + tracing::error!("Database update failed for password change: {}", e); ( StatusCode::INTERNAL_SERVER_ERROR, "Database update failed".to_string(), diff --git a/backend/src/main.rs b/backend/src/main.rs index 2a566a46..ebe34841 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -36,7 +36,7 @@ async fn main() -> Result<(), Box> { .init(); // Initialize JWT Secret early so it panics at startup if missing - api::admin::init_jwt_secret(); + shared::auth::init_jwt_secret(); // Improved error handling for DATABASE_URL let database_url = std::env::var("DATABASE_URL") diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index 525bc9bf..7ab8b92f 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -45,19 +45,9 @@ pub mod ssr_utils { _exp: usize, } - use std::sync::OnceLock; - static JWT_SECRET: OnceLock> = OnceLock::new(); - - // WARN: Synchronize this secret with backend/src/api/admin.rs - let secret = JWT_SECRET.get_or_init(|| { - std::env::var("JWT_SECRET") - .expect("JWT_SECRET environment variable must be set") - .into_bytes() - }); - let token_data = decode::( token, - &DecodingKey::from_secret(secret), + &DecodingKey::from_secret(shared::auth::get_jwt_secret()), &Validation::new(Algorithm::HS256), ) .map_err(|_| ServerFnError::new("Invalid token"))?; diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index a9372224..46113c9a 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,5 +1,6 @@ -- Users (Admin) CREATE TABLE users ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, @@ -8,6 +9,7 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, @@ -25,6 +27,7 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -37,6 +40,7 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -52,6 +56,7 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, @@ -65,6 +70,7 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, @@ -76,6 +82,7 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( + -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, diff --git a/nginx/nginx.conf b/nginx/nginx.conf index d3c57516..6f4b654f 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -97,7 +97,7 @@ http { # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $server_port; diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index c604ff8d..b7db9363 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -5,6 +5,11 @@ set -e echo "🚀 Setting up local development environment..." +if [ "$NODE_ENV" = "production" ] || [[ "$DATABASE_URL" == *"production"* ]]; then + echo "❌ Error: Production environment detected. Setup script aborted." + exit 1 +fi + # Check dependencies command -v cargo &> /dev/null || { echo "❌ cargo not found. Install Rust from https://rustup.rs/"; exit 1; } @@ -54,7 +59,9 @@ echo "" echo "⏳ Running database migrations..." # create an empty sqlite database file if it doesn't exist touch sqlite.db -export DATABASE_URL="sqlite://sqlite.db" +if [ -z "$DATABASE_URL" ]; then + export DATABASE_URL="sqlite://sqlite.db" +fi cargo sqlx migrate run -D "$DATABASE_URL" || true diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 5805e49f..6fa0dbda 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -7,3 +7,7 @@ edition = "2021" serde = { version = "1.0", features = ["derive"] } uuid = { version = "1.0", features = ["v4", "serde", "js"] } chrono = { version = "0.4", features = ["serde"] } + +[features] +default = [] +ssr = [] diff --git a/shared/src/auth.rs b/shared/src/auth.rs new file mode 100644 index 00000000..6a92d4af --- /dev/null +++ b/shared/src/auth.rs @@ -0,0 +1,16 @@ +use std::sync::OnceLock; + +static JWT_SECRET: OnceLock> = OnceLock::new(); + +pub fn init_jwt_secret() { + let secret = std::env::var("JWT_SECRET") + .expect("JWT_SECRET environment variable must be set") + .into_bytes(); + JWT_SECRET + .set(secret) + .expect("JWT_SECRET initialized twice"); +} + +pub fn get_jwt_secret() -> &'static [u8] { + JWT_SECRET.get().expect("JWT_SECRET not initialized") +} diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 71912e91..3c9f521c 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -1,3 +1,6 @@ +#[cfg(feature = "ssr")] +pub mod auth; + use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use uuid::Uuid; From 436d2711dcc8f23c512413d4e8d12284ccc0c197 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 17:25:22 -0700 Subject: [PATCH 21/64] fix: correct password length validation to count characters, enhance filename sanitization for article uploads, and enable the shared SSR feature for both backend and frontend. --- backend/Cargo.toml | 2 +- backend/src/api/admin.rs | 2 +- frontend/Cargo.toml | 3 ++- frontend/src/api/articles.rs | 18 ++++++++++-------- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 37032368..87fd7f8c 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -41,4 +41,4 @@ tower_governor = "0.4.2" [features] default = [] -ssr = ["leptos/ssr", "leptos_meta/ssr", "leptos_router/ssr", "frontend/ssr"] +ssr = ["leptos/ssr", "leptos_meta/ssr", "leptos_router/ssr", "frontend/ssr", "shared/ssr"] diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 45fe99e4..24747f5d 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -244,7 +244,7 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - if req.new_password.len() < 12 { + if req.new_password.chars().count() < 12 { return Err((StatusCode::BAD_REQUEST, "Password too short".to_string())); } diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index 61e6bfc1..56bedd14 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -33,7 +33,8 @@ ssr = [ "leptos_router/ssr", "dep:chrono", "dep:jsonwebtoken", - "dep:walkdir" + "dep:walkdir", + "shared/ssr" ] diff --git a/frontend/src/api/articles.rs b/frontend/src/api/articles.rs index 7ab8b92f..69cd9779 100644 --- a/frontend/src/api/articles.rs +++ b/frontend/src/api/articles.rs @@ -201,15 +201,17 @@ pub async fn upload_media( verify_token(&token)?; // We'll upload to a 'uploads' folder for manual picking or sorting later + let filtered_name: String = filename + .chars() + .filter(|c| c.is_alphanumeric() || *c == '.' || *c == '-' || *c == '_') + .collect(); + + if filtered_name.is_empty() { + return Err(ServerFnError::new("Invalid filename")); + } + let timestamp = chrono::Utc::now().timestamp(); - let safe_name = format!( - "{}_{}", - timestamp, - filename - .chars() - .filter(|c| c.is_alphanumeric() || *c == '.' || *c == '-' || *c == '_') - .collect::() - ); + let safe_name = format!("{}_{}", timestamp, filtered_name); let destination = format!( "gs://jakewray-portfolio/media/journalism/uploads/{}", safe_name From 92cde79f74637901d844d481624a984002d4a8f1 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 17:54:19 -0700 Subject: [PATCH 22/64] feat: Introduce password hashing utility, harden admin API login rate limits, and update database schema to store UUIDs as TEXT. --- backend/src/api/admin.rs | 5 +++-- docker-compose.prod.yaml | 1 + hgen/Cargo.toml | 7 +++++++ hgen/src/main.rs | 8 ++++++++ migrations/20260110000000_initial_schema.sql | 14 +++++++------- scripts/setup-dev.sh | 4 ++-- shared/src/auth.rs | 2 ++ 7 files changed, 30 insertions(+), 11 deletions(-) create mode 100644 hgen/Cargo.toml create mode 100644 hgen/src/main.rs diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 24747f5d..ecd9d2aa 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -58,6 +58,7 @@ fn hash_password(password: &str) -> Result { .map(|hash| hash.to_string()) } +#[inline(never)] fn verify_password(password: &str, password_hash: &str) -> bool { let parsed_hash = match PasswordHash::new(password_hash) { Ok(h) => h, @@ -73,8 +74,8 @@ pub fn router(state: crate::state::AppState) -> Router { let login_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) - .per_second(2) - .burst_size(5) + .per_second(1) + .burst_size(3) .finish() .unwrap(), ); diff --git a/docker-compose.prod.yaml b/docker-compose.prod.yaml index 5ba32dce..7cdde0d5 100644 --- a/docker-compose.prod.yaml +++ b/docker-compose.prod.yaml @@ -3,6 +3,7 @@ services: build: . restart: always environment: + # 4 slashes are intentional for an absolute path: sqlite:////app/data/sqlite.db - DATABASE_URL=sqlite:////app/data/sqlite.db - LEPTOS_SITE_ADDR=0.0.0.0:3000 - RUST_LOG=info diff --git a/hgen/Cargo.toml b/hgen/Cargo.toml new file mode 100644 index 00000000..e64ce7f1 --- /dev/null +++ b/hgen/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "hgen" +version = "0.1.0" +edition = "2021" + +[dependencies] +argon2 = { version = "0.5.0", features = ["std", "password-hash"] } diff --git a/hgen/src/main.rs b/hgen/src/main.rs new file mode 100644 index 00000000..2ef18977 --- /dev/null +++ b/hgen/src/main.rs @@ -0,0 +1,8 @@ +use argon2::{password_hash::{rand_core::OsRng, PasswordHasher, SaltString}, Argon2}; +fn main() { + let password = "demo-admin-2026!"; + let salt = SaltString::generate(&mut OsRng); + let argon2 = Argon2::default(); + let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); + println!("{}", hash); +} diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 46113c9a..717ee8bf 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,7 +1,7 @@ -- Users (Admin) CREATE TABLE users ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) @@ -10,7 +10,7 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -28,7 +28,7 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text @@ -41,7 +41,7 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' @@ -57,7 +57,7 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, url TEXT NOT NULL, -- S3 URL or local path @@ -71,7 +71,7 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, audio_url TEXT, @@ -83,7 +83,7 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id UUID PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, github_url TEXT, diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index b7db9363..034535cc 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -5,7 +5,7 @@ set -e echo "🚀 Setting up local development environment..." -if [ "$NODE_ENV" = "production" ] || [[ "$DATABASE_URL" == *"production"* ]]; then +if [ "$APP_ENV" = "production" ] || [[ "$DATABASE_URL" == *"/app/data"* ]]; then echo "❌ Error: Production environment detected. Setup script aborted." exit 1 fi @@ -77,7 +77,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F fi sqlite3 sqlite.db <> = OnceLock::new(); +/// Required initialization: Must be called at application startup before `get_jwt_secret()` is used, +/// otherwise `get_jwt_secret()` will panic. pub fn init_jwt_secret() { let secret = std::env::var("JWT_SECRET") .expect("JWT_SECRET environment variable must be set") From f9f583bb0678125f4baaa33d4d94a135ac375372 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 18:15:54 -0700 Subject: [PATCH 23/64] refactor: Use `PeerIpKeyExtractor` for admin rate limiting, make frontend console logs conditional on debug builds, exclude `hgen` from the workspace, and add dev setup security warnings. --- .env.example | 2 +- Cargo.toml | 1 + backend/src/api/admin.rs | 4 ++-- frontend/src/pages/admin/login.rs | 9 +++++++++ scripts/setup-dev.sh | 3 +++ 5 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index 30192ad6..323b38af 100644 --- a/.env.example +++ b/.env.example @@ -7,7 +7,7 @@ RUST_LOG=info RUST_BACKTRACE=1 # JWT Secret (CHANGE THIS IN PRODUCTION!) -JWT_SECRET=your-super-secret-key-change-in-production +JWT_SECRET=change-this-to-a-long-random-secret-in-production # Application environment ENVIRONMENT=development diff --git a/Cargo.toml b/Cargo.toml index 152324d0..d08ad214 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = ["frontend", "backend", "shared"] +exclude = ["hgen"] [workspace.dependencies] leptos = { version = "0.7" } diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index ecd9d2aa..701664a1 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -73,7 +73,7 @@ pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 2 requests per second, up to 5 burst let login_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) + .key_extractor(tower_governor::key_extractor::PeerIpKeyExtractor) .per_second(1) .burst_size(3) .finish() @@ -85,7 +85,7 @@ pub fn router(state: crate::state::AppState) -> Router { let password_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(tower_governor::key_extractor::SmartIpKeyExtractor) + .key_extractor(tower_governor::key_extractor::PeerIpKeyExtractor) .per_second(2) .burst_size(5) .finish() diff --git a/frontend/src/pages/admin/login.rs b/frontend/src/pages/admin/login.rs index 715ff511..b13de3fd 100644 --- a/frontend/src/pages/admin/login.rs +++ b/frontend/src/pages/admin/login.rs @@ -41,6 +41,7 @@ pub fn AdminLoginPage() -> impl IntoView { let navigate = use_navigate(); move |ev: leptos::ev::SubmitEvent| { ev.prevent_default(); + #[cfg(debug_assertions)] web_sys::console::log_1(&"[Login] Form submitted".into()); _set_loading.set(true); set_error.set("".to_string()); @@ -60,6 +61,7 @@ pub fn AdminLoginPage() -> impl IntoView { password: password_val.clone(), }; + #[cfg(debug_assertions)] web_sys::console::log_1(&"[Login] Sending POST /admin/login".into()); let result = async { @@ -67,6 +69,7 @@ pub fn AdminLoginPage() -> impl IntoView { .header("Content-Type", "application/json") .json(&req) .map_err(|e| { + #[cfg(debug_assertions)] web_sys::console::log_1( &format!("[Login] Serialize error: {:?}", e).into(), ); @@ -75,12 +78,14 @@ pub fn AdminLoginPage() -> impl IntoView { .send() .await .map_err(|e| { + #[cfg(debug_assertions)] web_sys::console::log_1( &format!("[Login] Network error: {:?}", e).into(), ); "Failed to connect to server".to_string() })?; + #[cfg(debug_assertions)] web_sys::console::log_1( &format!("[Login] Response status: {}", resp.status()).into(), ); @@ -90,10 +95,12 @@ pub fn AdminLoginPage() -> impl IntoView { } let data: LoginResponse = resp.json().await.map_err(|e| { + #[cfg(debug_assertions)] web_sys::console::log_1(&format!("[Login] Parse error: {:?}", e).into()); "Failed to parse response".to_string() })?; + #[cfg(debug_assertions)] web_sys::console::log_1( &"[Login] Token received, storing in localStorage".into(), ); @@ -109,10 +116,12 @@ pub fn AdminLoginPage() -> impl IntoView { match result { Ok(()) => { + #[cfg(debug_assertions)] web_sys::console::log_1(&"[Login] Success, navigating to dashboard".into()); navigate("/admin/dashboard", Default::default()) } Err(msg) => { + #[cfg(debug_assertions)] web_sys::console::log_1(&format!("[Login] Error: {}", msg).into()); set_error.set(msg); } diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 034535cc..0dc7c22f 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -67,6 +67,9 @@ cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" echo "👤 Creating default admin user..." +# WARN: The seeded Argon2 hash below corresponds to 'demo-admin-2026!'. +# Anyone reading the repository knows these default credentials. Check that this +# dev instance isn't exposed to untrusted networks. # Fallback to python UUID or kernel uuid if uuidgen missing ADMIN_UUID=$(uuidgen 2>/dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c 'import uuid; print(uuid.uuid4())' 2>/dev/null || { echo "❌ Could not generate a UUID. Please install uuidgen."; exit 1; }) SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') From 8b786278bc5d7756c212664f20094ba7d1878ccc Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 18:51:03 -0700 Subject: [PATCH 24/64] feat: Implement trusted proxy IP extraction for rate limiting, configure Nginx `X-Forwarded-For` handling, and update `hgen` to accept passwords as command-line arguments. --- .gitignore | 3 +++ backend/src/api/admin.rs | 48 +++++++++++++++++++++++++++++++++++++--- hgen/src/main.rs | 7 ++++-- nginx/nginx.conf | 2 +- shared/src/auth.rs | 11 +++++---- 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index f9aa68cc..3c7f9a3f 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,6 @@ imports/ .sass-cache/ .sass-cache +# Hgen +hgen/ +hgen/* diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 701664a1..a006e7f0 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -58,6 +58,48 @@ fn hash_password(password: &str) -> Result { .map(|hash| hash.to_string()) } +#[derive(Clone)] +pub struct TrustedProxyIpKeyExtractor; + +impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor { + type Key = String; + + fn extract(&self, req: &Request) -> Result { + let peer_ip = req + .extensions() + .get::>() + .map(|ci| ci.0.ip()); + + let is_trusted_proxy = peer_ip.is_some_and(|ip| { + ip.is_loopback() + || ip.is_unspecified() + || match ip { + std::net::IpAddr::V4(ipv4) => ipv4.is_private(), + _ => false, + } + }); + + if is_trusted_proxy { + if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { + return Ok(real_ip.to_string()); + } + if let Some(fwd) = req + .headers() + .get("X-Forwarded-For") + .and_then(|h| h.to_str().ok()) + { + if let Some(first_ip) = fwd.split(',').next() { + return Ok(first_ip.trim().to_string()); + } + } + } + + peer_ip + .map(|ip| ip.to_string()) + .ok_or(tower_governor::GovernorError::UnableToExtractKey) + } +} + #[inline(never)] fn verify_password(password: &str, password_hash: &str) -> bool { let parsed_hash = match PasswordHash::new(password_hash) { @@ -70,10 +112,10 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { - // Configure rate limit: 2 requests per second, up to 5 burst + // Configure rate limit: 1 request per second, up to 3 burst let login_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(tower_governor::key_extractor::PeerIpKeyExtractor) + .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) .burst_size(3) .finish() @@ -85,7 +127,7 @@ pub fn router(state: crate::state::AppState) -> Router { let password_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(tower_governor::key_extractor::PeerIpKeyExtractor) + .key_extractor(TrustedProxyIpKeyExtractor) .per_second(2) .burst_size(5) .finish() diff --git a/hgen/src/main.rs b/hgen/src/main.rs index 2ef18977..f2871eb3 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -1,6 +1,9 @@ -use argon2::{password_hash::{rand_core::OsRng, PasswordHasher, SaltString}, Argon2}; +use argon2::{ + password_hash::{rand_core::OsRng, PasswordHasher, SaltString}, + Argon2, +}; fn main() { - let password = "demo-admin-2026!"; + let password = std::env::args().nth(1).expect("Usage: hgen "); let salt = SaltString::generate(&mut OsRng); let argon2 = Argon2::default(); let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); diff --git a/nginx/nginx.conf b/nginx/nginx.conf index 6f4b654f..d3c57516 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -97,7 +97,7 @@ http { # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $server_port; diff --git a/shared/src/auth.rs b/shared/src/auth.rs index 79fe4ff2..d952e92d 100644 --- a/shared/src/auth.rs +++ b/shared/src/auth.rs @@ -5,12 +5,11 @@ static JWT_SECRET: OnceLock> = OnceLock::new(); /// Required initialization: Must be called at application startup before `get_jwt_secret()` is used, /// otherwise `get_jwt_secret()` will panic. pub fn init_jwt_secret() { - let secret = std::env::var("JWT_SECRET") - .expect("JWT_SECRET environment variable must be set") - .into_bytes(); - JWT_SECRET - .set(secret) - .expect("JWT_SECRET initialized twice"); + JWT_SECRET.get_or_init(|| { + std::env::var("JWT_SECRET") + .expect("JWT_SECRET environment variable must be set") + .into_bytes() + }); } pub fn get_jwt_secret() -> &'static [u8] { From 62907c232a54d95c868b7df0293241487e959445 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 19:12:47 -0700 Subject: [PATCH 25/64] feat: Improve UUID generation, enhance security by validating IPs and user IDs, secure password hashing, and optimize SQLite connection pooling. --- backend/src/api/admin.rs | 25 +++++++++++++---- backend/src/main.rs | 2 +- hgen/src/main.rs | 4 ++- migrations/20260110000000_initial_schema.sql | 28 ++++++++++---------- scripts/setup-dev.sh | 1 + 5 files changed, 39 insertions(+), 21 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index a006e7f0..f5484da0 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -81,7 +81,9 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor if is_trusted_proxy { if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { - return Ok(real_ip.to_string()); + if real_ip.parse::().is_ok() { + return Ok(real_ip.to_string()); + } } if let Some(fwd) = req .headers() @@ -89,7 +91,10 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .and_then(|h| h.to_str().ok()) { if let Some(first_ip) = fwd.split(',').next() { - return Ok(first_ip.trim().to_string()); + let ip_str = first_ip.trim(); + if ip_str.parse::().is_ok() { + return Ok(ip_str.to_string()); + } } } } @@ -198,7 +203,7 @@ async fn login( Some(ref u) => !verify_password(&req.password, &u.password_hash), None => { static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; - let _ = verify_password(&req.password, DUMMY_HASH); + let _ = std::hint::black_box(verify_password(&req.password, DUMMY_HASH)); true } }; @@ -257,14 +262,17 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); - let _token_data = jsonwebtoken::decode::( + let token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, ) .map_err(|_| StatusCode::UNAUTHORIZED)?; - Ok(Json(serde_json::json!({"authenticated": true}))) + Ok(Json(serde_json::json!({ + "authenticated": true, + "sub": token_data.claims.sub + }))) } async fn change_password( @@ -293,6 +301,13 @@ async fn change_password( let user_id = &token_data.claims.sub; + if uuid::Uuid::parse_str(user_id).is_err() { + return Err(( + StatusCode::BAD_REQUEST, + "Invalid user ID format".to_string(), + )); + } + // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") .bind(user_id) diff --git a/backend/src/main.rs b/backend/src/main.rs index ebe34841..a147ef1e 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(5) + .max_connections(1) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/hgen/src/main.rs b/hgen/src/main.rs index f2871eb3..19f275d7 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -3,7 +3,9 @@ use argon2::{ Argon2, }; fn main() { - let password = std::env::args().nth(1).expect("Usage: hgen "); + let mut password = String::new(); + std::io::stdin().read_line(&mut password).expect("Failed to read password"); + let password = password.trim_end(); let salt = SaltString::generate(&mut OsRng); let argon2 = Argon2::default(); let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 717ee8bf..bd54c66b 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,7 +1,7 @@ -- Users (Admin) CREATE TABLE users ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) @@ -9,8 +9,8 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -27,8 +27,8 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, content TEXT NOT NULL, -- Markdown/Rich Text @@ -40,8 +40,8 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' @@ -56,8 +56,8 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, url TEXT NOT NULL, -- S3 URL or local path @@ -70,8 +70,8 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, audio_url TEXT, @@ -82,8 +82,8 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( - -- Note: random() >> 1 is used in UUID generation to prevent INT64_MIN overflow in abs() - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (abs(random() >> 1) % 4) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, github_url TEXT, diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 0dc7c22f..57ceaf02 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -59,6 +59,7 @@ echo "" echo "⏳ Running database migrations..." # create an empty sqlite database file if it doesn't exist touch sqlite.db +chmod 600 sqlite.db if [ -z "$DATABASE_URL" ]; then export DATABASE_URL="sqlite://sqlite.db" fi From 24c447198014675d6896801335d4f4769bd20747 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 19:35:46 -0700 Subject: [PATCH 26/64] feat: Implement IPv6 private IP detection, reduce password rate limits, increase database connection pool, refine user ID error handling, and add hgen usage comments. --- backend/src/api/admin.rs | 13 ++++++++----- backend/src/main.rs | 2 +- hgen/src/main.rs | 8 ++++++++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index f5484da0..ca4be3a9 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -75,7 +75,10 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor || ip.is_unspecified() || match ip { std::net::IpAddr::V4(ipv4) => ipv4.is_private(), - _ => false, + std::net::IpAddr::V6(ipv6) => { + (ipv6.segments()[0] & 0xfe00) == 0xfc00 + || (ipv6.segments()[0] & 0xffc0) == 0xfe80 + } } }); @@ -133,8 +136,8 @@ pub fn router(state: crate::state::AppState) -> Router { let password_governor_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(2) - .burst_size(5) + .per_second(1) + .burst_size(3) .finish() .unwrap(), ); @@ -303,8 +306,8 @@ async fn change_password( if uuid::Uuid::parse_str(user_id).is_err() { return Err(( - StatusCode::BAD_REQUEST, - "Invalid user ID format".to_string(), + StatusCode::INTERNAL_SERVER_ERROR, + "Invalid user ID format in token".to_string(), )); } diff --git a/backend/src/main.rs b/backend/src/main.rs index a147ef1e..ebe34841 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { .busy_timeout(std::time::Duration::from_secs(5)); let pool = SqlitePoolOptions::new() - .max_connections(1) + .max_connections(5) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; diff --git a/hgen/src/main.rs b/hgen/src/main.rs index 19f275d7..add25b2f 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -1,3 +1,11 @@ +// Usage: +// This utility reads the password from standard input (stdin), not from command-line arguments. +// This improves security by preventing the password from appearing in shell history or `ps` output. +// +// Example: +// echo -n "mypassword" | cargo run --bin hgen +// or run `cargo run --bin hgen` and type the password followed by Enter. + use argon2::{ password_hash::{rand_core::OsRng, PasswordHasher, SaltString}, Argon2, From 13b3a05a5160d3879148f8688eb4f7525763859b Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 20:17:40 -0700 Subject: [PATCH 27/64] feat: Implement pagination for article and blog post listings; refactor JWT secret initialization, unify admin API rate limiting, and refine password validation and trimming. --- backend/src/api/admin.rs | 33 ++++++++------------------------- backend/src/api/public.rs | 16 +++++++++++++--- hgen/src/main.rs | 2 +- shared/src/auth.rs | 14 +++++++------- 4 files changed, 29 insertions(+), 36 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index ca4be3a9..3484cbdd 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -88,18 +88,6 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor return Ok(real_ip.to_string()); } } - if let Some(fwd) = req - .headers() - .get("X-Forwarded-For") - .and_then(|h| h.to_str().ok()) - { - if let Some(first_ip) = fwd.split(',').next() { - let ip_str = first_ip.trim(); - if ip_str.parse::().is_ok() { - return Ok(ip_str.to_string()); - } - } - } } peer_ip @@ -121,7 +109,7 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 1 request per second, up to 3 burst - let login_governor_conf = std::sync::Arc::new( + let rate_limit_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) @@ -130,19 +118,11 @@ pub fn router(state: crate::state::AppState) -> Router { .unwrap(), ); let login_governor_layer = tower_governor::GovernorLayer { - config: login_governor_conf, + config: rate_limit_conf.clone(), }; - let password_governor_conf = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(3) - .finish() - .unwrap(), - ); let password_governor_layer = tower_governor::GovernorLayer { - config: password_governor_conf, + config: rate_limit_conf, }; Router::new() @@ -298,8 +278,11 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - if req.new_password.chars().count() < 12 { - return Err((StatusCode::BAD_REQUEST, "Password too short".to_string())); + if req.new_password.len() < 12 || req.new_password.len() > 128 { + return Err(( + StatusCode::BAD_REQUEST, + "Password length must be between 12 and 128 bytes".to_string(), + )); } let user_id = &token_data.claims.sub; diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 4ebf5975..7d86ba62 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -1,7 +1,11 @@ -use axum::{extract::State, routing::get, Json, Router}; +use axum::{extract::Query, extract::State, routing::get, Json, Router}; use shared::{Article, BlogPost}; use sqlx::SqlitePool; +#[derive(serde::Deserialize)] +pub struct Pagination { + pub limit: Option, +} pub fn router(state: crate::state::AppState) -> Router { Router::new() .route("/health", get(health_check)) @@ -18,8 +22,11 @@ use sqlx::Row; async fn list_articles( State(pool): State, + Query(query): Query, ) -> Result>, axum::http::StatusCode> { - match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT 20") + let limit = query.limit.unwrap_or(20).min(50); + match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ?") + .bind(limit) .try_map(|row: sqlx::sqlite::SqliteRow| { let origin_str: String = row.get("origin"); let origin = match origin_str.as_str() { @@ -56,8 +63,11 @@ async fn list_articles( async fn list_blog_posts( State(pool): State, + Query(query): Query, ) -> Result>, axum::http::StatusCode> { - match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT 20") + let limit = query.limit.unwrap_or(20).min(50); + match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ?") + .bind(limit) .try_map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); let tags = match tags_str { diff --git a/hgen/src/main.rs b/hgen/src/main.rs index add25b2f..db3bd689 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -13,7 +13,7 @@ use argon2::{ fn main() { let mut password = String::new(); std::io::stdin().read_line(&mut password).expect("Failed to read password"); - let password = password.trim_end(); + let password = password.trim(); let salt = SaltString::generate(&mut OsRng); let argon2 = Argon2::default(); let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); diff --git a/shared/src/auth.rs b/shared/src/auth.rs index d952e92d..c8ffaf29 100644 --- a/shared/src/auth.rs +++ b/shared/src/auth.rs @@ -2,16 +2,16 @@ use std::sync::OnceLock; static JWT_SECRET: OnceLock> = OnceLock::new(); -/// Required initialization: Must be called at application startup before `get_jwt_secret()` is used, -/// otherwise `get_jwt_secret()` will panic. +/// Required initialization: Call early if you want to fail fast on startup, +/// but `get_jwt_secret` will also lazily initialize it. pub fn init_jwt_secret() { + let _ = get_jwt_secret(); +} + +pub fn get_jwt_secret() -> &'static [u8] { JWT_SECRET.get_or_init(|| { std::env::var("JWT_SECRET") .expect("JWT_SECRET environment variable must be set") .into_bytes() - }); -} - -pub fn get_jwt_secret() -> &'static [u8] { - JWT_SECRET.get().expect("JWT_SECRET not initialized") + }) } From 43a5434f2efc6f884f0c6192517f1976abecff9e Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 20:28:05 -0700 Subject: [PATCH 28/64] feat: Implement offset pagination for public content, enhance admin API security with dynamic password hashing and refined rate limits, and enable SSR by default. --- backend/Cargo.toml | 2 +- backend/src/api/admin.rs | 28 ++++++++++++++++++++++------ backend/src/api/public.rs | 9 +++++++-- scripts/setup-dev.sh | 7 +++++-- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 87fd7f8c..cfae5cd0 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -40,5 +40,5 @@ frontend = { path = "../frontend", default-features = false, features = [ tower_governor = "0.4.2" [features] -default = [] +default = ["ssr"] ssr = ["leptos/ssr", "leptos_meta/ssr", "leptos_router/ssr", "frontend/ssr", "shared/ssr"] diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 3484cbdd..3cf57750 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -109,7 +109,15 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 1 request per second, up to 3 burst - let rate_limit_conf = std::sync::Arc::new( + let login_rate_limit_conf = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(3) + .finish() + .unwrap(), + ); + let password_rate_limit_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) @@ -118,11 +126,11 @@ pub fn router(state: crate::state::AppState) -> Router { .unwrap(), ); let login_governor_layer = tower_governor::GovernorLayer { - config: rate_limit_conf.clone(), + config: login_rate_limit_conf, }; let password_governor_layer = tower_governor::GovernorLayer { - config: rate_limit_conf, + config: password_rate_limit_conf, }; Router::new() @@ -261,8 +269,14 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { async fn change_password( State(pool): State, headers: HeaderMap, - Json(req): Json, + req: Request, ) -> Result { + let bytes = to_bytes(req.into_body(), 16 * 1024) + .await + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; + + let req: ChangePasswordRequest = serde_json::from_slice(&bytes) + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -278,10 +292,12 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - if req.new_password.len() < 12 || req.new_password.len() > 128 { + let char_count = req.new_password.chars().count(); + let byte_count = req.new_password.len(); + if char_count < 12 || byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Password length must be between 12 and 128 bytes".to_string(), + "Password length must be at least 12 characters and max 128 bytes".to_string(), )); } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 7d86ba62..561aa737 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -5,6 +5,7 @@ use sqlx::SqlitePool; #[derive(serde::Deserialize)] pub struct Pagination { pub limit: Option, + pub offset: Option, } pub fn router(state: crate::state::AppState) -> Router { Router::new() @@ -25,8 +26,10 @@ async fn list_articles( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ?") + let offset = query.offset.unwrap_or(0); + match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) + .bind(offset) .try_map(|row: sqlx::sqlite::SqliteRow| { let origin_str: String = row.get("origin"); let origin = match origin_str.as_str() { @@ -66,8 +69,10 @@ async fn list_blog_posts( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ?") + let offset = query.offset.unwrap_or(0); + match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) + .bind(offset) .try_map(|row: sqlx::sqlite::SqliteRow| { let tags_str: Option = row.get("tags"); let tags = match tags_str { diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 57ceaf02..d5607530 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -68,9 +68,12 @@ cargo sqlx migrate run -D "$DATABASE_URL" || true echo "" echo "👤 Creating default admin user..." -# WARN: The seeded Argon2 hash below corresponds to 'demo-admin-2026!'. +# WARN: The seeded password below is 'demo-admin-2026!'. # Anyone reading the repository knows these default credentials. Check that this # dev instance isn't exposed to untrusted networks. +# Generate hash dynamically +ADMIN_HASH=$(echo -n "demo-admin-2026!" | cargo run --bin hgen -q | tail -n 1) + # Fallback to python UUID or kernel uuid if uuidgen missing ADMIN_UUID=$(uuidgen 2>/dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c 'import uuid; print(uuid.uuid4())' 2>/dev/null || { echo "❌ Could not generate a UUID. Please install uuidgen."; exit 1; }) SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') @@ -81,7 +84,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F fi sqlite3 sqlite.db < Date: Thu, 19 Mar 2026 20:34:51 -0700 Subject: [PATCH 29/64] feat: Configure trusted proxies via environment variable, reduce API rate limit burst size, add API input validation, enhance dev setup script robustness, and improve JWT secret error message. --- backend/src/api/admin.rs | 41 +++++++++++++++++++++++++++------------- scripts/setup-dev.sh | 11 +++++++---- shared/src/auth.rs | 4 +++- 3 files changed, 38 insertions(+), 18 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 3cf57750..3e2e028f 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -70,17 +70,13 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .get::>() .map(|ci| ci.0.ip()); - let is_trusted_proxy = peer_ip.is_some_and(|ip| { - ip.is_loopback() - || ip.is_unspecified() - || match ip { - std::net::IpAddr::V4(ipv4) => ipv4.is_private(), - std::net::IpAddr::V6(ipv6) => { - (ipv6.segments()[0] & 0xfe00) == 0xfc00 - || (ipv6.segments()[0] & 0xffc0) == 0xfe80 - } - } - }); + let trusted_ips: Vec = std::env::var("TRUSTED_PROXY_IPS") + .unwrap_or_default() + .split(',') + .filter_map(|s| s.trim().parse().ok()) + .collect(); + + let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); if is_trusted_proxy { if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { @@ -113,7 +109,7 @@ pub fn router(state: crate::state::AppState) -> Router { tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) - .burst_size(3) + .burst_size(1) .finish() .unwrap(), ); @@ -121,7 +117,7 @@ pub fn router(state: crate::state::AppState) -> Router { tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) - .burst_size(3) + .burst_size(1) .finish() .unwrap(), ); @@ -271,6 +267,18 @@ async fn change_password( headers: HeaderMap, req: Request, ) -> Result { + let content_type = headers + .get(header::CONTENT_TYPE) + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + + if !content_type.contains("application/json") { + return Err(( + StatusCode::UNSUPPORTED_MEDIA_TYPE, + "Unsupported content type".to_string(), + )); + } + let bytes = to_bytes(req.into_body(), 16 * 1024) .await .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; @@ -292,6 +300,13 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; + if req.current_password.is_empty() || req.current_password.len() > 128 { + return Err(( + StatusCode::BAD_REQUEST, + "Invalid current password length".to_string(), + )); + } + let char_count = req.new_password.chars().count(); let byte_count = req.new_password.len(); if char_count < 12 || byte_count > 128 { diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index d5607530..6877eadd 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -72,7 +72,12 @@ echo "👤 Creating default admin user..." # Anyone reading the repository knows these default credentials. Check that this # dev instance isn't exposed to untrusted networks. # Generate hash dynamically -ADMIN_HASH=$(echo -n "demo-admin-2026!" | cargo run --bin hgen -q | tail -n 1) +ADMIN_HASH=$(echo -n "demo-admin-2026!" | cargo run --quiet --bin hgen 2>/dev/null | tail -n 1) + +if ! [[ "$ADMIN_HASH" =~ ^\$argon2 ]]; then + echo "❌ hgen failed or produced unexpected output" + exit 1 +fi # Fallback to python UUID or kernel uuid if uuidgen missing ADMIN_UUID=$(uuidgen 2>/dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c 'import uuid; print(uuid.uuid4())' 2>/dev/null || { echo "❌ Could not generate a UUID. Please install uuidgen."; exit 1; }) @@ -83,9 +88,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db < &'static [u8] { JWT_SECRET.get_or_init(|| { std::env::var("JWT_SECRET") - .expect("JWT_SECRET environment variable must be set") + .unwrap_or_else(|_| { + panic!("JWT_SECRET environment variable must be set. If this is a frontend/WASM build, the 'ssr' feature may have been incorrectly enabled."); + }) .into_bytes() }) } From 0a402a5ee92d44f6516256a6b8c87dd672f90037 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 21:04:35 -0700 Subject: [PATCH 30/64] refactor: Cache trusted proxy IPs with OnceLock, consolidate admin rate limit configurations, and fix hgen script execution. --- backend/src/api/admin.rs | 48 +++++++++++++++++++++------------------- scripts/setup-dev.sh | 2 +- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 3e2e028f..65f3e2f0 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -19,6 +19,7 @@ use chrono::{Duration, Utc}; use jsonwebtoken::{encode, EncodingKey, Header}; use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; +use std::sync::OnceLock; #[derive(Serialize, Deserialize)] pub struct Claims { @@ -70,11 +71,14 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .get::>() .map(|ci| ci.0.ip()); - let trusted_ips: Vec = std::env::var("TRUSTED_PROXY_IPS") - .unwrap_or_default() - .split(',') - .filter_map(|s| s.trim().parse().ok()) - .collect(); + static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); + let trusted_ips = TRUSTED_PROXY_IPS.get_or_init(|| { + std::env::var("TRUSTED_PROXY_IPS") + .unwrap_or_default() + .split(',') + .filter_map(|s| s.trim().parse().ok()) + .collect() + }); let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); @@ -105,15 +109,7 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // Configure rate limit: 1 request per second, up to 3 burst - let login_rate_limit_conf = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - let password_rate_limit_conf = std::sync::Arc::new( + let rate_limit_conf = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) @@ -121,12 +117,17 @@ pub fn router(state: crate::state::AppState) -> Router { .finish() .unwrap(), ); + let login_governor_layer = tower_governor::GovernorLayer { - config: login_rate_limit_conf, + config: rate_limit_conf.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: password_rate_limit_conf, + config: rate_limit_conf.clone(), + }; + + let me_governor_layer = tower_governor::GovernorLayer { + config: rate_limit_conf, }; Router::new() @@ -135,7 +136,7 @@ pub fn router(state: crate::state::AppState) -> Router { "/password", post(change_password).route_layer(password_governor_layer), ) - .route("/me", get(me)) + .route("/me", get(me).route_layer(me_governor_layer)) .with_state(state) } @@ -279,12 +280,6 @@ async fn change_password( )); } - let bytes = to_bytes(req.into_body(), 16 * 1024) - .await - .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; - - let req: ChangePasswordRequest = serde_json::from_slice(&bytes) - .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -300,6 +295,13 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; + let bytes = to_bytes(req.into_body(), 16 * 1024) + .await + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; + + let req: ChangePasswordRequest = serde_json::from_slice(&bytes) + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; + if req.current_password.is_empty() || req.current_password.len() > 128 { return Err(( StatusCode::BAD_REQUEST, diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 6877eadd..d6ea874b 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -72,7 +72,7 @@ echo "👤 Creating default admin user..." # Anyone reading the repository knows these default credentials. Check that this # dev instance isn't exposed to untrusted networks. # Generate hash dynamically -ADMIN_HASH=$(echo -n "demo-admin-2026!" | cargo run --quiet --bin hgen 2>/dev/null | tail -n 1) +ADMIN_HASH=$(echo -n "demo-admin-2026!" | (cd hgen && cargo run --quiet 2>/dev/null) | tail -n 1) if ! [[ "$ADMIN_HASH" =~ ^\$argon2 ]]; then echo "❌ hgen failed or produced unexpected output" From 8d882327449d3662f90f3b98d20519ea0eb99c28 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 21:54:01 -0700 Subject: [PATCH 31/64] feat: Implement trusted proxy IP configuration for accurate rate limiting, refine admin API rate limits, and improve user ID parsing in the admin API. --- .env.example | 3 ++ README.md | 1 + backend/src/api/admin.rs | 57 ++++++++++++-------- migrations/20260110000000_initial_schema.sql | 2 - 4 files changed, 38 insertions(+), 25 deletions(-) diff --git a/.env.example b/.env.example index 323b38af..981abab8 100644 --- a/.env.example +++ b/.env.example @@ -11,3 +11,6 @@ JWT_SECRET=change-this-to-a-long-random-secret-in-production # Application environment ENVIRONMENT=development + +# Proxy Configuration (REQUIRED in production if using Nginx/Reverse Proxy) +# TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 diff --git a/README.md b/README.md index 388e827e..5fd8c272 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ For first-time SSL setup on the server: ### Known Limitations - **Database Concurrency**: The application uses embedded SQLite in WAL mode with a small connection pool (`max_connections(5)`). SQLite only allows one concurrent writer. Concurrent write bursts will queue (up to a 5s busy timeout) and could fail under heavy write load. This is acceptable for a personal blog/portfolio, but must be accounted for if write traffic scales. +- **Reverse Proxy Setup**: When deploying behind a reverse proxy (such as Nginx), you **MUST** configure the `TRUSTED_PROXY_IPS` environment variable with the proxy's IP address. If left unset, all client requests will appear to come from the proxy's IP, effectively disabling per-client rate limiting and causing all users to share the same rate limit bucket. ## Development diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 65f3e2f0..ddc39db4 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -84,8 +84,8 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor if is_trusted_proxy { if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { - if real_ip.parse::().is_ok() { - return Ok(real_ip.to_string()); + if let Ok(parsed_ip) = real_ip.parse::() { + return Ok(parsed_ip.to_string()); } } } @@ -108,26 +108,37 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { - // Configure rate limit: 1 request per second, up to 3 burst - let rate_limit_conf = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - let login_governor_layer = tower_governor::GovernorLayer { - config: rate_limit_conf.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let password_governor_layer = tower_governor::GovernorLayer { - config: rate_limit_conf.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let me_governor_layer = tower_governor::GovernorLayer { - config: rate_limit_conf, + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(10) + .finish() + .unwrap(), + ), }; Router::new() @@ -191,6 +202,8 @@ async fn login( Some(ref u) => !verify_password(&req.password, &u.password_hash), None => { static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; + // Note: This does not provide a robust timing-safe guarantee against advanced analysis, + // but prevents trivial early-return optimization timing differences. let _ = std::hint::black_box(verify_password(&req.password, DUMMY_HASH)); true } @@ -318,18 +331,16 @@ async fn change_password( )); } - let user_id = &token_data.claims.sub; - - if uuid::Uuid::parse_str(user_id).is_err() { - return Err(( + let parsed_user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { + ( StatusCode::INTERNAL_SERVER_ERROR, "Invalid user ID format in token".to_string(), - )); - } + ) + })?; // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(user_id) + .bind(parsed_user_id) .fetch_optional(&pool) .await .map_err(|e| { @@ -360,7 +371,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id) + .bind(parsed_user_id) .execute(&pool) .await .map_err(|e| { diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index bd54c66b..74513d52 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -93,5 +93,3 @@ CREATE TABLE projects ( is_featured BOOLEAN DEFAULT FALSE, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); - - From f5f5db4b0fbd24c22a45ff3860a1a3887ab2001f Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 22:24:42 -0700 Subject: [PATCH 32/64] feat: Enhance API and frontend security by adding rate limiting, improving IP extraction, mitigating login timing attacks, validating image link schemes, and securing user creation. --- backend/src/api/admin.rs | 36 ++++++++++++++++++++++++---------- backend/src/api/public.rs | 19 ++++++++++++++---- frontend/src/pages/sections.rs | 31 +++++++++++++++++------------ scripts/setup-dev.sh | 3 ++- 4 files changed, 62 insertions(+), 27 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index ddc39db4..753eb742 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -88,6 +88,17 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor return Ok(parsed_ip.to_string()); } } + if let Some(forwarded_for) = req.headers().get("X-Forwarded-For").and_then(|h| h.to_str().ok()) { + if let Some(first_ip) = forwarded_for.split(',').next() { + if let Ok(parsed_ip) = first_ip.trim().parse::() { + return Ok(parsed_ip.to_string()); + } + } + } + tracing::warn!( + "TRUSTED_PROXY_IPS allowed proxy IP {}, but no valid X-Real-IP or X-Forwarded-For header was found. Rate limiting will apply to the proxy IP.", + peer_ip.unwrap() + ); } peer_ip @@ -198,17 +209,20 @@ async fn login( ) })?; - let is_invalid = match user { - Some(ref u) => !verify_password(&req.password, &u.password_hash), + let (hash_to_verify, is_valid_user) = match user { + Some(ref u) => (u.password_hash.as_str(), true), None => { + // To prevent early-return timing leaks, we always verify a password hash. + // If the user doesn't exist, we use a dummy hash. The dummy hash's source + // password is irrelevant as it's only used to consume time. static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; - // Note: This does not provide a robust timing-safe guarantee against advanced analysis, - // but prevents trivial early-return optimization timing differences. - let _ = std::hint::black_box(verify_password(&req.password, DUMMY_HASH)); - true + (DUMMY_HASH, false) } }; + let password_match = verify_password(&req.password, hash_to_verify); + let is_invalid = !is_valid_user || !password_match; + if is_invalid { if content_type.contains("application/x-www-form-urlencoded") || content_type.contains("multipart/form-data") @@ -278,10 +292,11 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { async fn change_password( State(pool): State, - headers: HeaderMap, req: Request, ) -> Result { - let content_type = headers + let (parts, body) = req.into_parts(); + let content_type = parts + .headers .get(header::CONTENT_TYPE) .and_then(|v| v.to_str().ok()) .unwrap_or(""); @@ -293,7 +308,8 @@ async fn change_password( )); } - let token = headers + let token = parts + .headers .get("Authorization") .and_then(|h| h.to_str().ok()) .and_then(|s| s.strip_prefix("Bearer ")) @@ -308,7 +324,7 @@ async fn change_password( ) .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token".to_string()))?; - let bytes = to_bytes(req.into_body(), 16 * 1024) + let bytes = to_bytes(body, 16 * 1024) .await .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 561aa737..0ad8557f 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -8,10 +8,21 @@ pub struct Pagination { pub offset: Option, } pub fn router(state: crate::state::AppState) -> Router { + let public_governor_layer = tower_governor::GovernorLayer { + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::admin::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ), + }; + Router::new() .route("/health", get(health_check)) - .route("/api/articles", get(list_articles)) - .route("/api/blog", get(list_blog_posts)) + .route("/api/articles", get(list_articles).route_layer(public_governor_layer.clone())) + .route("/api/blog", get(list_blog_posts).route_layer(public_governor_layer)) .with_state(state) } @@ -26,7 +37,7 @@ async fn list_articles( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0); + let offset = query.offset.unwrap_or(0).min(100_000); match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) @@ -69,7 +80,7 @@ async fn list_blog_posts( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0); + let offset = query.offset.unwrap_or(0).min(100_000); match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 31c7e3c9..094a0b83 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -220,18 +220,25 @@ fn linkify_images(html: &str) -> String { }; if let Some(src_url) = src { - let wrapper_start = format!( - "", - src_url - ); - let wrapper_end = ""; - - // Replace strict range - let new_content = format!("{}{}{}", wrapper_start, img_tag, wrapper_end); - out.replace_range(abs_open..abs_close, &new_content); - - search_pos = abs_open + new_content.len(); - continue; + let is_safe_scheme = src_url.starts_with("http://") + || src_url.starts_with("https://") + || src_url.starts_with("data:image/") + || src_url.starts_with('/'); + + if is_safe_scheme { + let wrapper_start = format!( + "", + src_url + ); + let wrapper_end = ""; + + // Replace strict range + let new_content = format!("{}{}{}", wrapper_start, img_tag, wrapper_end); + out.replace_range(abs_open..abs_close, &new_content); + + search_pos = abs_open + new_content.len(); + continue; + } } search_pos = abs_close; } else { diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index d6ea874b..8d0fe311 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -88,7 +88,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ADMIN_HASH') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES (?1, ?2, ?3) ON CONFLICT (username) DO NOTHING;" \ + "$SAFE_UUID" "admin" "$ADMIN_HASH" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" From 142c44d885f1ea41b8b2f70029f0928cbcb7598a Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Thu, 19 Mar 2026 23:00:58 -0700 Subject: [PATCH 33/64] feat: Improve rate limiting proxy detection, simplify user ID generation, and refine admin password change error handling. --- backend/src/api/admin.rs | 19 +++++++++++++------ backend/src/api/public.rs | 10 ++++++++-- backend/src/main.rs | 6 ++++++ migrations/20260110000000_initial_schema.sql | 3 +-- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 753eb742..9f29a962 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -88,9 +88,13 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor return Ok(parsed_ip.to_string()); } } - if let Some(forwarded_for) = req.headers().get("X-Forwarded-For").and_then(|h| h.to_str().ok()) { - if let Some(first_ip) = forwarded_for.split(',').next() { - if let Ok(parsed_ip) = first_ip.trim().parse::() { + if let Some(forwarded_for) = req + .headers() + .get("X-Forwarded-For") + .and_then(|h| h.to_str().ok()) + { + if let Some(last_ip) = forwarded_for.split(',').next_back() { + if let Ok(parsed_ip) = last_ip.trim().parse::() { return Ok(parsed_ip.to_string()); } } @@ -356,7 +360,7 @@ async fn change_password( // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(parsed_user_id) + .bind(parsed_user_id.to_string()) .fetch_optional(&pool) .await .map_err(|e| { @@ -367,7 +371,10 @@ async fn change_password( ) })?; - let user = user.ok_or((StatusCode::NOT_FOUND, "User not found".to_string()))?; + let user = user.ok_or(( + StatusCode::FORBIDDEN, + "Invalid current password".to_string(), + ))?; if !verify_password(&req.current_password, &user.password_hash) { return Err(( @@ -387,7 +394,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(parsed_user_id) + .bind(parsed_user_id.to_string()) .execute(&pool) .await .map_err(|e| { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 0ad8557f..51933ee3 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -21,8 +21,14 @@ pub fn router(state: crate::state::AppState) -> Router { Router::new() .route("/health", get(health_check)) - .route("/api/articles", get(list_articles).route_layer(public_governor_layer.clone())) - .route("/api/blog", get(list_blog_posts).route_layer(public_governor_layer)) + .route( + "/api/articles", + get(list_articles).route_layer(public_governor_layer.clone()), + ) + .route( + "/api/blog", + get(list_blog_posts).route_layer(public_governor_layer), + ) .with_state(state) } diff --git a/backend/src/main.rs b/backend/src/main.rs index ebe34841..817ab14f 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -75,6 +75,12 @@ async fn main() -> Result<(), Box> { tracing::warn!("====================================================================="); } + if std::env::var("TRUSTED_PROXY_IPS").is_err() { + tracing::warn!("====================================================================="); + tracing::warn!("WARNING: TRUSTED_PROXY_IPS is not set. All users may share a single rate-limit bucket if behind a proxy."); + tracing::warn!("====================================================================="); + } + // Build LeptosOptions from environment/config let site_addr: SocketAddr = std::env::var("LEPTOS_SITE_ADDR") .unwrap_or_else(|_| "0.0.0.0:3000".to_string()) diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 74513d52..eada5822 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,7 +1,6 @@ -- Users (Admin) CREATE TABLE users ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits - id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), + id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) From 72e29021b0151f2f679be6051f845bf0b8d35c9a Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 00:06:07 -0700 Subject: [PATCH 34/64] feat: Generate UUIDs for user IDs, prevent password verification timing attacks, and run Docker containers as a non-root user, while also refactoring governor configuration and improving password input trimming. --- backend/src/api/admin.rs | 44 ++++++++++---------- docker-compose.prod.yaml | 1 + hgen/src/main.rs | 2 +- migrations/20260110000000_initial_schema.sql | 3 +- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 9f29a962..0bf07e92 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -93,6 +93,8 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .get("X-Forwarded-For") .and_then(|h| h.to_str().ok()) { + // We pick the rightmost IP (next_back) because trustworthy proxies append client IPs to the end. + // If a proxy replaces the header outright instead of appending, this still yields the correct single IP. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { return Ok(parsed_ip.to_string()); @@ -123,26 +125,21 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { + let shared_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + let login_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: shared_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: shared_governor_config.clone(), }; let me_governor_layer = tower_governor::GovernorLayer { @@ -371,12 +368,17 @@ async fn change_password( ) })?; - let user = user.ok_or(( - StatusCode::FORBIDDEN, - "Invalid current password".to_string(), - ))?; + let (hash_to_verify, is_valid_user) = match user { + Some(ref u) => (u.password_hash.as_str(), true), + None => { + static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; + (DUMMY_HASH, false) + } + }; + + let password_match = verify_password(&req.current_password, hash_to_verify); - if !verify_password(&req.current_password, &user.password_hash) { + if !is_valid_user || !password_match { return Err(( StatusCode::FORBIDDEN, "Invalid current password".to_string(), diff --git a/docker-compose.prod.yaml b/docker-compose.prod.yaml index 7cdde0d5..93b54dd4 100644 --- a/docker-compose.prod.yaml +++ b/docker-compose.prod.yaml @@ -2,6 +2,7 @@ services: portfolio: build: . restart: always + user: "1000:1000" environment: # 4 slashes are intentional for an absolute path: sqlite:////app/data/sqlite.db - DATABASE_URL=sqlite:////app/data/sqlite.db diff --git a/hgen/src/main.rs b/hgen/src/main.rs index db3bd689..ff6b493d 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -13,7 +13,7 @@ use argon2::{ fn main() { let mut password = String::new(); std::io::stdin().read_line(&mut password).expect("Failed to read password"); - let password = password.trim(); + let password = password.trim_end_matches('\n').trim_end_matches('\r'); let salt = SaltString::generate(&mut OsRng); let argon2 = Argon2::default(); let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index eada5822..74513d52 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,6 +1,7 @@ -- Users (Admin) CREATE TABLE users ( - id TEXT PRIMARY KEY, + -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) From deee4a0938c6072ab34882c4b849c851569ecee2 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 00:16:57 -0700 Subject: [PATCH 35/64] refactor: Refactor admin API rate limiting and dummy hash usage, improve proxy warning logic, and ensure correct data directory permissions during remote builds. --- backend/src/api/admin.rs | 41 +++++++++++++++++++++------------------- backend/src/main.rs | 4 +++- scripts/remote_build.sh | 3 +++ 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 0bf07e92..bac2e9ee 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -21,6 +21,8 @@ use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; use std::sync::OnceLock; +const DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; + #[derive(Serialize, Deserialize)] pub struct Claims { sub: String, @@ -125,21 +127,26 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { - let shared_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - let login_governor_layer = tower_governor::GovernorLayer { - config: shared_governor_config.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let password_governor_layer = tower_governor::GovernorLayer { - config: shared_governor_config.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let me_governor_layer = tower_governor::GovernorLayer { @@ -216,7 +223,6 @@ async fn login( // To prevent early-return timing leaks, we always verify a password hash. // If the user doesn't exist, we use a dummy hash. The dummy hash's source // password is irrelevant as it's only used to consume time. - static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; (DUMMY_HASH, false) } }; @@ -348,7 +354,7 @@ async fn change_password( )); } - let parsed_user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { + let _parsed_user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { ( StatusCode::INTERNAL_SERVER_ERROR, "Invalid user ID format in token".to_string(), @@ -357,7 +363,7 @@ async fn change_password( // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(parsed_user_id.to_string()) + .bind(token_data.claims.sub.clone()) .fetch_optional(&pool) .await .map_err(|e| { @@ -370,10 +376,7 @@ async fn change_password( let (hash_to_verify, is_valid_user) = match user { Some(ref u) => (u.password_hash.as_str(), true), - None => { - static DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; - (DUMMY_HASH, false) - } + None => (DUMMY_HASH, false), }; let password_match = verify_password(&req.current_password, hash_to_verify); @@ -396,7 +399,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(parsed_user_id.to_string()) + .bind(token_data.claims.sub.clone()) .execute(&pool) .await .map_err(|e| { diff --git a/backend/src/main.rs b/backend/src/main.rs index 817ab14f..2a177009 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -75,7 +75,9 @@ async fn main() -> Result<(), Box> { tracing::warn!("====================================================================="); } - if std::env::var("TRUSTED_PROXY_IPS").is_err() { + if std::env::var("ENVIRONMENT").as_deref() == Ok("production") + && std::env::var("TRUSTED_PROXY_IPS").is_err() + { tracing::warn!("====================================================================="); tracing::warn!("WARNING: TRUSTED_PROXY_IPS is not set. All users may share a single rate-limit bucket if behind a proxy."); tracing::warn!("====================================================================="); diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 09637b33..6c3f199b 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -29,6 +29,7 @@ if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then -t portfolio-chef . echo "Ensuring DB is up for preparation..." + mkdir -p data && chmod 700 data && sudo chown 1000:1000 data sudo docker compose -f compose.prod.yaml up -d db echo "Waiting for DB..." sleep 5 @@ -53,11 +54,13 @@ if [ "$TARGET" = "all" ]; then echo "Building and starting ALL services with BuildKit caching..." sudo DOCKER_BUILDKIT=1 docker compose -f compose.prod.yaml build \ --build-arg BUILDKIT_INLINE_CACHE=1 + mkdir -p data && chmod 700 data && sudo chown 1000:1000 data sudo docker compose -f compose.prod.yaml up -d --remove-orphans elif [ "$TARGET" = "backend" ]; then echo "Building and restarting BACKEND (portfolio) service with caching..." sudo DOCKER_BUILDKIT=1 docker compose -f compose.prod.yaml build \ --build-arg BUILDKIT_INLINE_CACHE=1 portfolio + mkdir -p data && chmod 700 data && sudo chown 1000:1000 data sudo docker compose -f compose.prod.yaml up -d --no-deps portfolio elif [ "$TARGET" = "frontend" ]; then echo "Frontend is part of the backend binary in this setup (SSR)." From 1349d5a71ddef04dc676f19f23b204d2b4e60dca Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 00:29:27 -0700 Subject: [PATCH 36/64] feat: Migrate backend to SQLite, enforce production environment security checks, and refine script execution. --- backend/src/api/admin.rs | 8 +++----- backend/src/main.rs | 4 +--- scripts/remote_build.sh | 25 ++----------------------- scripts/setup-dev.sh | 7 +++++-- shared/src/auth.rs | 9 ++++++--- 5 files changed, 17 insertions(+), 36 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index bac2e9ee..87ed0c5e 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -95,8 +95,8 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .get("X-Forwarded-For") .and_then(|h| h.to_str().ok()) { - // We pick the rightmost IP (next_back) because trustworthy proxies append client IPs to the end. - // If a proxy replaces the header outright instead of appending, this still yields the correct single IP. + // We pick the rightmost IP (next_back) because Nginx appends the connecting client's IP to the right of any existing XFF. + // The rightmost entry is the client IP as seen by Nginx. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { return Ok(parsed_ip.to_string()); @@ -241,11 +241,9 @@ async fn login( return Err((StatusCode::UNAUTHORIZED, "Invalid credentials".to_string())); } - let user = user.expect("User should exist when credentials are valid"); - let exp = (Utc::now() + Duration::hours(24)).timestamp() as usize; let claims = Claims { - sub: user.id.clone(), + sub: user.unwrap().id, exp, }; diff --git a/backend/src/main.rs b/backend/src/main.rs index 2a177009..e281bd84 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -78,9 +78,7 @@ async fn main() -> Result<(), Box> { if std::env::var("ENVIRONMENT").as_deref() == Ok("production") && std::env::var("TRUSTED_PROXY_IPS").is_err() { - tracing::warn!("====================================================================="); - tracing::warn!("WARNING: TRUSTED_PROXY_IPS is not set. All users may share a single rate-limit bucket if behind a proxy."); - tracing::warn!("====================================================================="); + panic!("TRUSTED_PROXY_IPS must be set in production. Otherwise, all users behind a proxy will share a single rate-limit bucket."); } // Build LeptosOptions from environment/config diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 6c3f199b..8179458a 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -12,13 +12,10 @@ export COMPOSE_DOCKER_CLI_BUILD=1 # Generate .env file with defaults for production cat < .env -POSTGRES_USER=admin -POSTGRES_PASSWORD=password -POSTGRES_DB=portfolio DOMAIN_NAME=jakewray.dev LEPTOS_SITE_ADDR=0.0.0.0:3000 RUST_LOG=info -DATABASE_URL=postgres://admin:password@db:5432/portfolio +DATABASE_URL=sqlite:////app/data/sqlite.db EOF if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then @@ -28,26 +25,8 @@ if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then --cache-from portfolio-chef:latest \ -t portfolio-chef . - echo "Ensuring DB is up for preparation..." + echo "Ensuring data directory exists..." mkdir -p data && chmod 700 data && sudo chown 1000:1000 data - sudo docker compose -f compose.prod.yaml up -d db - echo "Waiting for DB..." - sleep 5 - - echo "Running sqlx prepare on server..." - DB_CONTAINER=$(sudo docker compose -f compose.prod.yaml ps -q db | head -n1) - - # We use the chef image which has sqlx-cli installed, and mount source code - sudo docker run --rm \ - --network container:$DB_CONTAINER \ - -v "$(pwd)":/app \ - -w /app \ - -u root \ - -e DATABASE_URL=postgres://admin:password@localhost:5432/portfolio \ - -e SQLX_OFFLINE=false \ - portfolio-chef \ - cargo sqlx prepare --workspace - sudo chown -R jake-user:jake-user . fi if [ "$TARGET" = "all" ]; then diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 8d0fe311..405b099f 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -5,7 +5,7 @@ set -e echo "🚀 Setting up local development environment..." -if [ "$APP_ENV" = "production" ] || [[ "$DATABASE_URL" == *"/app/data"* ]]; then +if [ "$ENVIRONMENT" = "production" ] || [[ "$DATABASE_URL" == *"/app/data"* ]]; then echo "❌ Error: Production environment detected. Setup script aborted." exit 1 fi @@ -72,7 +72,10 @@ echo "👤 Creating default admin user..." # Anyone reading the repository knows these default credentials. Check that this # dev instance isn't exposed to untrusted networks. # Generate hash dynamically -ADMIN_HASH=$(echo -n "demo-admin-2026!" | (cd hgen && cargo run --quiet 2>/dev/null) | tail -n 1) +if [ ! -x "hgen/target/release/hgen" ]; then + (cd hgen && cargo build --release --quiet) +fi +ADMIN_HASH=$(echo -n "demo-admin-2026!" | ./hgen/target/release/hgen 2>/dev/null | tail -n 1) if ! [[ "$ADMIN_HASH" =~ ^\$argon2 ]]; then echo "❌ hgen failed or produced unexpected output" diff --git a/shared/src/auth.rs b/shared/src/auth.rs index ddf0332b..c7004134 100644 --- a/shared/src/auth.rs +++ b/shared/src/auth.rs @@ -10,10 +10,13 @@ pub fn init_jwt_secret() { pub fn get_jwt_secret() -> &'static [u8] { JWT_SECRET.get_or_init(|| { - std::env::var("JWT_SECRET") + let secret = std::env::var("JWT_SECRET") .unwrap_or_else(|_| { panic!("JWT_SECRET environment variable must be set. If this is a frontend/WASM build, the 'ssr' feature may have been incorrectly enabled."); - }) - .into_bytes() + }); + if secret.len() < 32 { + panic!("JWT_SECRET must be at least 32 characters long for security."); + } + secret.into_bytes() }) } From 1077c4571241417cfe13b727f62e135149a9e711 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 02:35:35 -0700 Subject: [PATCH 37/64] feat: Pin Argon2 hashing parameters, refine client IP extraction logic, and introduce new environment variables for remote builds. --- backend/src/api/admin.rs | 38 ++++++++++++++++++++++++++++++-------- scripts/remote_build.sh | 3 +++ test_argon2.rs | 7 +++++++ 3 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 test_argon2.rs diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 87ed0c5e..b1f35118 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -52,10 +52,25 @@ struct UserRow { password_hash: String, } +// Pin Argon2 parameters (m=19456, t=2, p=1) to prevent timing discrepancies if defaults ever change. +const ARGON2_M_COST: u32 = 19456; +const ARGON2_T_COST: u32 = 2; +const ARGON2_P_COST: u32 = 1; + +fn get_argon2() -> Argon2<'static> { + let params = argon2::Params::new( + ARGON2_M_COST, + ARGON2_T_COST, + ARGON2_P_COST, + Some(argon2::Params::DEFAULT_OUTPUT_LEN), + ) + .expect("Valid Argon2 parameters"); + Argon2::new(argon2::Algorithm::Argon2id, argon2::Version::V0x13, params) +} + fn hash_password(password: &str) -> Result { let salt = SaltString::generate(&mut OsRng); - let argon2 = Argon2::default(); - argon2 + get_argon2() .hash_password(password.as_bytes(), &salt) .map_err(|e| e.to_string()) .map(|hash| hash.to_string()) @@ -85,18 +100,23 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); if is_trusted_proxy { + // Priority 1: X-Real-IP is checked first. + // Some proxy configurations use X-Real-IP to explicitly pass the client IP, overriding XFF lists. if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { if let Ok(parsed_ip) = real_ip.parse::() { return Ok(parsed_ip.to_string()); } } + // Priority 2: X-Forwarded-For if let Some(forwarded_for) = req .headers() .get("X-Forwarded-For") .and_then(|h| h.to_str().ok()) { - // We pick the rightmost IP (next_back) because Nginx appends the connecting client's IP to the right of any existing XFF. - // The rightmost entry is the client IP as seen by Nginx. + // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration + // uses `proxy_add_x_forwarded_for`, which appends the connecting client's IP to the right. + // NOTE: If intermediate proxies exist between Nginx and this backend that are NOT in TRUSTED_PROXY_IPS, + // the rightmost IP will be the last untrusted proxy's IP, not the true client. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { return Ok(parsed_ip.to_string()); @@ -121,12 +141,14 @@ fn verify_password(password: &str, password_hash: &str) -> bool { Ok(h) => h, Err(_) => return false, }; - Argon2::default() + get_argon2() .verify_password(password.as_bytes(), &parsed_hash) .is_ok() } pub fn router(state: crate::state::AppState) -> Router { + // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. + // Burst windows completely refresh across restarts. let login_governor_layer = tower_governor::GovernorLayer { config: std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() @@ -352,7 +374,7 @@ async fn change_password( )); } - let _parsed_user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { + let user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { ( StatusCode::INTERNAL_SERVER_ERROR, "Invalid user ID format in token".to_string(), @@ -361,7 +383,7 @@ async fn change_password( // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(token_data.claims.sub.clone()) + .bind(user_id.to_string()) .fetch_optional(&pool) .await .map_err(|e| { @@ -397,7 +419,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(token_data.claims.sub.clone()) + .bind(user_id.to_string()) .execute(&pool) .await .map_err(|e| { diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 8179458a..82f2d52b 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -16,6 +16,9 @@ DOMAIN_NAME=jakewray.dev LEPTOS_SITE_ADDR=0.0.0.0:3000 RUST_LOG=info DATABASE_URL=sqlite:////app/data/sqlite.db +ENVIRONMENT=production +JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') +TRUSTED_PROXY_IPS=127.0.0.1 EOF if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then diff --git a/test_argon2.rs b/test_argon2.rs new file mode 100644 index 00000000..2e78a4dc --- /dev/null +++ b/test_argon2.rs @@ -0,0 +1,7 @@ +use argon2::{Algorithm, Version, Params, Argon2}; + +fn main() { + let params = Params::new(19456, 2, 1, None).unwrap(); + let argon2 = Argon2::new(Algorithm::Argon2id, Version::V0x13, params); + println!("OK"); +} From 9e783be0e540beab2c377b91843d5655497132ea Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 13:00:32 -0700 Subject: [PATCH 38/64] Configure Argon2 parameters explicitly, improve admin API password validation and shared rate limit configuration, and update development and build scripts. --- backend/src/api/admin.rs | 35 ++++++++++++++++------------------- hgen/src/main.rs | 12 +++++++++++- scripts/remote_build.sh | 2 +- scripts/setup-dev.sh | 7 +++++-- test_argon2.rs | 7 ------- 5 files changed, 33 insertions(+), 30 deletions(-) delete mode 100644 test_argon2.rs diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index b1f35118..5e1499c4 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -149,26 +149,21 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. // Burst windows completely refresh across restarts. + let shared_auth_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + let login_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: shared_auth_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: shared_auth_governor_config.clone(), }; let me_governor_layer = tower_governor::GovernorLayer { @@ -358,10 +353,12 @@ async fn change_password( let req: ChangePasswordRequest = serde_json::from_slice(&bytes) .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; - if req.current_password.is_empty() || req.current_password.len() > 128 { + let current_char_count = req.current_password.chars().count(); + let current_byte_count = req.current_password.len(); + if current_char_count < 12 || current_byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Invalid current password length".to_string(), + "Current password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), )); } @@ -370,7 +367,7 @@ async fn change_password( if char_count < 12 || byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Password length must be at least 12 characters and max 128 bytes".to_string(), + "New password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), )); } diff --git a/hgen/src/main.rs b/hgen/src/main.rs index ff6b493d..e8af43d5 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -15,7 +15,17 @@ fn main() { std::io::stdin().read_line(&mut password).expect("Failed to read password"); let password = password.trim_end_matches('\n').trim_end_matches('\r'); let salt = SaltString::generate(&mut OsRng); - let argon2 = Argon2::default(); + let params = argon2::Params::new( + 19456, + 2, + 1, + Some(argon2::Params::DEFAULT_OUTPUT_LEN), + ).unwrap(); + let argon2 = Argon2::new( + argon2::Algorithm::Argon2id, + argon2::Version::V0x13, + params, + ); let hash = argon2.hash_password(password.as_bytes(), &salt).unwrap(); println!("{}", hash); } diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 82f2d52b..37225664 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -18,7 +18,7 @@ RUST_LOG=info DATABASE_URL=sqlite:////app/data/sqlite.db ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') -TRUSTED_PROXY_IPS=127.0.0.1 +TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 EOF if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 405b099f..1ee040b4 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -91,8 +91,11 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES (?1, ?2, ?3) ON CONFLICT (username) DO NOTHING;" \ - "$SAFE_UUID" "admin" "$ADMIN_HASH" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db < Date: Fri, 20 Mar 2026 13:19:53 -0700 Subject: [PATCH 39/64] refactor: Split admin rate limit configurations, relax password length validation, and conditionally generate `.env` in build scripts. --- backend/src/api/admin.rs | 20 ++++++++++++++------ scripts/remote_build.sh | 8 ++++++-- scripts/setup-dev.sh | 5 +---- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 5e1499c4..3b1db884 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -149,7 +149,7 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. // Burst windows completely refresh across restarts. - let shared_auth_governor_config = std::sync::Arc::new( + let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) @@ -159,11 +159,20 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: shared_auth_governor_config.clone(), + config: login_governor_config, }; + let password_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + let password_governor_layer = tower_governor::GovernorLayer { - config: shared_auth_governor_config.clone(), + config: password_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -353,12 +362,11 @@ async fn change_password( let req: ChangePasswordRequest = serde_json::from_slice(&bytes) .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; - let current_char_count = req.current_password.chars().count(); let current_byte_count = req.current_password.len(); - if current_char_count < 12 || current_byte_count > 128 { + if current_byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Current password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), + "Current password length must be no more than 128 bytes (for Argon2 processing).".to_string(), )); } diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 37225664..3d29ccfd 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -10,8 +10,9 @@ echo "Remote Build Target: $TARGET" export DOCKER_BUILDKIT=1 export COMPOSE_DOCKER_CLI_BUILD=1 -# Generate .env file with defaults for production -cat < .env +if [ ! -f .env ]; then + echo "Generating new .env file with defaults..." + cat < .env DOMAIN_NAME=jakewray.dev LEPTOS_SITE_ADDR=0.0.0.0:3000 RUST_LOG=info @@ -20,6 +21,9 @@ ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 EOF +else + echo "Using existing .env file." +fi if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then echo "Building chef base image (with cache)..." diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 1ee040b4..b74b1250 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -91,10 +91,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db < Date: Fri, 20 Mar 2026 16:44:52 -0700 Subject: [PATCH 40/64] refactor: centralize Argon2 parameters, enhance password validation and rate limit warnings, and reduce API pagination offsets. --- backend/src/api/admin.rs | 16 ++++++---------- backend/src/api/public.rs | 4 ++-- backend/src/main.rs | 16 ++++++++++++---- hgen/Cargo.toml | 1 + hgen/src/main.rs | 6 +++--- scripts/setup-dev.sh | 1 + shared/src/auth.rs | 4 ++++ 7 files changed, 29 insertions(+), 19 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 3b1db884..d03255ca 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -52,16 +52,11 @@ struct UserRow { password_hash: String, } -// Pin Argon2 parameters (m=19456, t=2, p=1) to prevent timing discrepancies if defaults ever change. -const ARGON2_M_COST: u32 = 19456; -const ARGON2_T_COST: u32 = 2; -const ARGON2_P_COST: u32 = 1; - fn get_argon2() -> Argon2<'static> { let params = argon2::Params::new( - ARGON2_M_COST, - ARGON2_T_COST, - ARGON2_P_COST, + shared::auth::ARGON2_M_COST, + shared::auth::ARGON2_T_COST, + shared::auth::ARGON2_P_COST, Some(argon2::Params::DEFAULT_OUTPUT_LEN), ) .expect("Valid Argon2 parameters"); @@ -362,11 +357,12 @@ async fn change_password( let req: ChangePasswordRequest = serde_json::from_slice(&bytes) .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; + let current_char_count = req.current_password.chars().count(); let current_byte_count = req.current_password.len(); - if current_byte_count > 128 { + if current_char_count < 12 || current_byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Current password length must be no more than 128 bytes (for Argon2 processing).".to_string(), + "Current password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), )); } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 51933ee3..e270713b 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -43,7 +43,7 @@ async fn list_articles( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0).min(100_000); + let offset = query.offset.unwrap_or(0).min(10_000); match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) @@ -86,7 +86,7 @@ async fn list_blog_posts( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0).min(100_000); + let offset = query.offset.unwrap_or(0).min(10_000); match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) diff --git a/backend/src/main.rs b/backend/src/main.rs index e281bd84..c5e70d94 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -75,10 +75,18 @@ async fn main() -> Result<(), Box> { tracing::warn!("====================================================================="); } - if std::env::var("ENVIRONMENT").as_deref() == Ok("production") - && std::env::var("TRUSTED_PROXY_IPS").is_err() - { - panic!("TRUSTED_PROXY_IPS must be set in production. Otherwise, all users behind a proxy will share a single rate-limit bucket."); + if std::env::var("ENVIRONMENT").as_deref() == Ok("production") { + match std::env::var("TRUSTED_PROXY_IPS").as_deref() { + Err(_) => panic!("TRUSTED_PROXY_IPS must be set in production. Otherwise, all users behind a proxy will share a single rate-limit bucket."), + Ok("172.18.0.2,172.18.0.3") | Ok("172.18.0.2, 172.18.0.3") => { + tracing::warn!("====================================================================="); + tracing::warn!("WARNING: TRUSTED_PROXY_IPS is set to the default Docker bridge IPs."); + tracing::warn!("Container IPs can change on restart. Rate limiting may fail open if these are incorrect."); + tracing::warn!("Please verify these IPs post-deploy or use a more robust mechanism."); + tracing::warn!("====================================================================="); + } + Ok(_) => {} + } } // Build LeptosOptions from environment/config diff --git a/hgen/Cargo.toml b/hgen/Cargo.toml index e64ce7f1..432ba8ae 100644 --- a/hgen/Cargo.toml +++ b/hgen/Cargo.toml @@ -5,3 +5,4 @@ edition = "2021" [dependencies] argon2 = { version = "0.5.0", features = ["std", "password-hash"] } +shared = { path = "../shared", features = ["ssr"] } diff --git a/hgen/src/main.rs b/hgen/src/main.rs index e8af43d5..87e63a96 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -16,9 +16,9 @@ fn main() { let password = password.trim_end_matches('\n').trim_end_matches('\r'); let salt = SaltString::generate(&mut OsRng); let params = argon2::Params::new( - 19456, - 2, - 1, + shared::auth::ARGON2_M_COST, + shared::auth::ARGON2_T_COST, + shared::auth::ARGON2_P_COST, Some(argon2::Params::DEFAULT_OUTPUT_LEN), ).unwrap(); let argon2 = Argon2::new( diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index b74b1250..d4929d0b 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -12,6 +12,7 @@ fi # Check dependencies command -v cargo &> /dev/null || { echo "❌ cargo not found. Install Rust from https://rustup.rs/"; exit 1; } +command -v sqlite3 &> /dev/null || { echo "❌ sqlite3 not found"; exit 1; } # Check for container runtime CONTAINER_CMD="" diff --git a/shared/src/auth.rs b/shared/src/auth.rs index c7004134..90a4d2ec 100644 --- a/shared/src/auth.rs +++ b/shared/src/auth.rs @@ -1,5 +1,9 @@ use std::sync::OnceLock; +pub const ARGON2_M_COST: u32 = 19456; +pub const ARGON2_T_COST: u32 = 2; +pub const ARGON2_P_COST: u32 = 1; + static JWT_SECRET: OnceLock> = OnceLock::new(); /// Required initialization: Call early if you want to fail fast on startup, From 357d0bb7b9a60d6b9fcd6420c64c87132d3fbc4d Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Fri, 20 Mar 2026 17:20:41 -0700 Subject: [PATCH 41/64] refactor: Enhance login security by dynamically generating dummy password hashes and adding a password length check, alongside clarifying rate limit comments. --- backend/src/api/admin.rs | 28 ++++++++++++++++++++++------ backend/src/api/public.rs | 3 +++ scripts/setup-dev.sh | 3 ++- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index d03255ca..7cc63439 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -21,7 +21,17 @@ use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; use std::sync::OnceLock; -const DUMMY_HASH: &str = "$argon2id$v=19$m=19456,t=2,p=1$75vBQ9LN4IAiHrViVOPI4w$L1wC8aj0h6PO/I8xVshCOB0TjOa9CTkfx8dIKA/0FVY"; +fn get_dummy_hash() -> &'static str { + static DUMMY_HASH: OnceLock = OnceLock::new(); + DUMMY_HASH.get_or_init(|| { + let password = "dummy-password-that-will-never-match"; + let salt = SaltString::generate(&mut OsRng); + get_argon2() + .hash_password(password.as_bytes(), &salt) + .expect("Failed to generate dummy hash") + .to_string() + }) +} #[derive(Serialize, Deserialize)] pub struct Claims { @@ -110,8 +120,8 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor { // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration // uses `proxy_add_x_forwarded_for`, which appends the connecting client's IP to the right. - // NOTE: If intermediate proxies exist between Nginx and this backend that are NOT in TRUSTED_PROXY_IPS, - // the rightmost IP will be the last untrusted proxy's IP, not the true client. + // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer + // will put its own IP rightmost, making all traffic share one rate limit bucket. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { return Ok(parsed_ip.to_string()); @@ -143,7 +153,8 @@ fn verify_password(password: &str, password_hash: &str) -> bool { pub fn router(state: crate::state::AppState) -> Router { // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. - // Burst windows completely refresh across restarts. + // Burst windows completely refresh across restarts. Therefore, the effective rate limiting + // window ONLY covers uptime, not absolute calendar time. let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) @@ -225,6 +236,11 @@ async fn login( )); }; + // Prevent extremely long passwords from exhausting Argon2 CPU time. + if req.password.len() > 128 { + return Err((StatusCode::BAD_REQUEST, "Password too long".to_string())); + } + let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE username = ?") .bind(&req.username) @@ -244,7 +260,7 @@ async fn login( // To prevent early-return timing leaks, we always verify a password hash. // If the user doesn't exist, we use a dummy hash. The dummy hash's source // password is irrelevant as it's only used to consume time. - (DUMMY_HASH, false) + (get_dummy_hash(), false) } }; @@ -397,7 +413,7 @@ async fn change_password( let (hash_to_verify, is_valid_user) = match user { Some(ref u) => (u.password_hash.as_str(), true), - None => (DUMMY_HASH, false), + None => (get_dummy_hash(), false), }; let password_match = verify_password(&req.current_password, hash_to_verify); diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index e270713b..60a850b9 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -19,6 +19,9 @@ pub fn router(state: crate::state::AppState) -> Router { ), }; + // NOTE: Cloning `GovernorLayer` clones the underlying `Arc`. + // Both `/api/articles` and `/api/blog` share the exact same rate limit bucket per IP. + // Bursting one endpoint will exhaust the quota for the other. This is intentional. Router::new() .route("/health", get(health_check)) .route( diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index d4929d0b..e631fd4b 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,7 +92,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db -cmd "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ADMIN_HASH') ON CONFLICT (username) DO NOTHING;" <<'EOF' || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db < Date: Fri, 20 Mar 2026 17:42:18 -0700 Subject: [PATCH 42/64] feat: Optimize Argon2 initialization with `OnceLock`, fix SQLite admin password hash escaping, and refine admin API endpoints. --- backend/src/api/admin.rs | 33 ++++++++++++++++++--------------- scripts/remote_build.sh | 2 ++ scripts/setup-dev.sh | 5 ++--- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 7cc63439..a7be60df 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -62,15 +62,18 @@ struct UserRow { password_hash: String, } -fn get_argon2() -> Argon2<'static> { - let params = argon2::Params::new( - shared::auth::ARGON2_M_COST, - shared::auth::ARGON2_T_COST, - shared::auth::ARGON2_P_COST, - Some(argon2::Params::DEFAULT_OUTPUT_LEN), - ) - .expect("Valid Argon2 parameters"); - Argon2::new(argon2::Algorithm::Argon2id, argon2::Version::V0x13, params) +fn get_argon2() -> &'static Argon2<'static> { + static ARGON2: OnceLock> = OnceLock::new(); + ARGON2.get_or_init(|| { + let params = argon2::Params::new( + shared::auth::ARGON2_M_COST, + shared::auth::ARGON2_T_COST, + shared::auth::ARGON2_P_COST, + Some(argon2::Params::DEFAULT_OUTPUT_LEN), + ) + .expect("Valid Argon2 parameters"); + Argon2::new(argon2::Algorithm::Argon2id, argon2::Version::V0x13, params) + }) } fn hash_password(password: &str) -> Result { @@ -155,6 +158,7 @@ pub fn router(state: crate::state::AppState) -> Router { // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. // Burst windows completely refresh across restarts. Therefore, the effective rate limiting // window ONLY covers uptime, not absolute calendar time. + tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) @@ -319,7 +323,7 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); - let token_data = jsonwebtoken::decode::( + let _token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, @@ -327,8 +331,7 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { .map_err(|_| StatusCode::UNAUTHORIZED)?; Ok(Json(serde_json::json!({ - "authenticated": true, - "sub": token_data.claims.sub + "authenticated": true }))) } @@ -373,12 +376,12 @@ async fn change_password( let req: ChangePasswordRequest = serde_json::from_slice(&bytes) .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; - let current_char_count = req.current_password.chars().count(); let current_byte_count = req.current_password.len(); - if current_char_count < 12 || current_byte_count > 128 { + if current_byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "Current password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), + "Current password length must be no more than 128 bytes (for Argon2 processing)." + .to_string(), )); } diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 3d29ccfd..d45c56a3 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -19,6 +19,8 @@ RUST_LOG=info DATABASE_URL=sqlite:////app/data/sqlite.db ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') +# Warning: Ephemeral Docker Bridge IPs change on restart. +# In production, use the Docker network name resolved at startup, or assign fixed IPs with --ip. TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 EOF else diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index e631fd4b..ed8ee84d 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,9 +92,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db < Date: Fri, 20 Mar 2026 20:01:06 -0700 Subject: [PATCH 43/64] feat: Enhance security by using parameterized queries for admin setup and validating proxy IPs, improve logging for client IP extraction and JWT errors, and refine password newline stripping. --- backend/src/api/admin.rs | 22 +++++++++++++++++----- backend/src/main.rs | 1 + hgen/src/main.rs | 2 +- scripts/setup-dev.sh | 3 ++- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index a7be60df..78196ac5 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -127,6 +127,7 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor // will put its own IP rightmost, making all traffic share one rate limit bucket. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { + tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); return Ok(parsed_ip.to_string()); } } @@ -155,9 +156,11 @@ fn verify_password(password: &str, password_hash: &str) -> bool { } pub fn router(state: crate::state::AppState) -> Router { - // NOTE: tower_governor uses in-memory state. A server restart will reset all rate limit counters. + // KNOWN LIMITATION: tower_governor uses in-memory state. A server restart will reset all rate limit counters. // Burst windows completely refresh across restarts. Therefore, the effective rate limiting - // window ONLY covers uptime, not absolute calendar time. + // window ONLY covers uptime, not absolute calendar time. An attacker who can trigger or observe + // restarts could reset their login throttle window. For a low-traffic personal site, this is an + // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() @@ -315,7 +318,10 @@ async fn login( } } -async fn me(headers: HeaderMap) -> Result, StatusCode> { +async fn me( + headers: HeaderMap, + peer_info: Option>, +) -> Result, StatusCode> { let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -328,7 +334,13 @@ async fn me(headers: HeaderMap) -> Result, StatusCode> { &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, ) - .map_err(|_| StatusCode::UNAUTHORIZED)?; + .map_err(|e| { + let ip = peer_info + .map(|ci| ci.0.ip().to_string()) + .unwrap_or_else(|| "unknown".to_string()); + tracing::warn!("Invalid token on /me from {}: {}", ip, e); + StatusCode::UNAUTHORIZED + })?; Ok(Json(serde_json::json!({ "authenticated": true @@ -390,7 +402,7 @@ async fn change_password( if char_count < 12 || byte_count > 128 { return Err(( StatusCode::BAD_REQUEST, - "New password length must be at least 12 characters and no more than 128 bytes (for Argon2 processing).".to_string(), + "New password length must be at least 12 characters and no more than 128 bytes (policy limit).".to_string(), )); } diff --git a/backend/src/main.rs b/backend/src/main.rs index c5e70d94..b4d4c815 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -78,6 +78,7 @@ async fn main() -> Result<(), Box> { if std::env::var("ENVIRONMENT").as_deref() == Ok("production") { match std::env::var("TRUSTED_PROXY_IPS").as_deref() { Err(_) => panic!("TRUSTED_PROXY_IPS must be set in production. Otherwise, all users behind a proxy will share a single rate-limit bucket."), + Ok(ips) if ips.trim().is_empty() => panic!("TRUSTED_PROXY_IPS is set but empty. This will cause all proxies to be untrusted, collapsing rate limits."), Ok("172.18.0.2,172.18.0.3") | Ok("172.18.0.2, 172.18.0.3") => { tracing::warn!("====================================================================="); tracing::warn!("WARNING: TRUSTED_PROXY_IPS is set to the default Docker bridge IPs."); diff --git a/hgen/src/main.rs b/hgen/src/main.rs index 87e63a96..209592de 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -13,7 +13,7 @@ use argon2::{ fn main() { let mut password = String::new(); std::io::stdin().read_line(&mut password).expect("Failed to read password"); - let password = password.trim_end_matches('\n').trim_end_matches('\r'); + let password = password.strip_suffix("\r\n").or_else(|| password.strip_suffix("\n")).unwrap_or(&password); let salt = SaltString::generate(&mut OsRng); let params = argon2::Params::new( shared::auth::ARGON2_M_COST, diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index ed8ee84d..466bb912 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -93,7 +93,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F fi ESCAPED_HASH="${ADMIN_HASH//\'/\'\'}" -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ESCAPED_HASH') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db -cmd ".param set @id '$SAFE_UUID'" -cmd ".param set @hash '$ESCAPED_HASH'" \ + "INSERT INTO users (id, username, password_hash) VALUES (@id, 'admin', @hash) ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" From 9206866502bce1cf0d951e3a1d9afecbc1ef856f Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Mon, 23 Mar 2026 13:11:41 -0700 Subject: [PATCH 44/64] fix: quote rsync destination path in deploy script --- scripts/deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/deploy.sh b/scripts/deploy.sh index 0f1c7ac7..aee3dca8 100755 --- a/scripts/deploy.sh +++ b/scripts/deploy.sh @@ -48,7 +48,7 @@ rsync -avz --info=progress2 \ --exclude '.DS_Store' \ -e "ssh -i ~/.ssh/google_compute_engine -o StrictHostKeyChecking=no" \ ./ \ - jake-user@$IP:~/app/ + "jake-user@$IP:~/app/" # 2. SSH and Deploy echo "Starting remote configuration and build..." From 366dd8ec56c86cb1de15c8a111fdc2c217d7fa7a Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Mon, 23 Mar 2026 13:32:39 -0700 Subject: [PATCH 45/64] feat: Include user ID in login response, harden password verification against timing attacks, and refine SQLite connection pool configuration. --- backend/src/api/admin.rs | 8 +++++--- backend/src/main.rs | 5 +++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 78196ac5..aca3348a 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -271,7 +271,7 @@ async fn login( } }; - let password_match = verify_password(&req.password, hash_to_verify); + let password_match = std::hint::black_box(verify_password(&req.password, hash_to_verify)); let is_invalid = !is_valid_user || !password_match; if is_invalid { @@ -343,7 +343,8 @@ async fn me( })?; Ok(Json(serde_json::json!({ - "authenticated": true + "authenticated": true, + "user_id": _token_data.claims.sub }))) } @@ -431,7 +432,8 @@ async fn change_password( None => (get_dummy_hash(), false), }; - let password_match = verify_password(&req.current_password, hash_to_verify); + let password_match = + std::hint::black_box(verify_password(&req.current_password, hash_to_verify)); if !is_valid_user || !password_match { return Err(( diff --git a/backend/src/main.rs b/backend/src/main.rs index b4d4c815..8dce6269 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -49,8 +49,13 @@ async fn main() -> Result<(), Box> { .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) .busy_timeout(std::time::Duration::from_secs(5)); + // With WAL mode, SQLite allows concurrent readers, but all writers are still + // serialized with a single write lock. Setting max_connections(5) helps with concurrent + // reads. We explicitly set min_connections(1) to reflect the serialized write constraint, + // though the pool handles queuing writes against each other up to the busy timeout. let pool = SqlitePoolOptions::new() .max_connections(5) + .min_connections(1) .connect_with(connect_options) .await .map_err(|e| format!("Failed to create database pool: {}", e))?; From 8750da8d36731f0c3849118238dbbbe5c2d63ea7 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Mon, 23 Mar 2026 17:19:09 -0700 Subject: [PATCH 46/64] refactor: Enhance image data URL validation, streamline schema UUID generation comments, and improve backend IP parsing and proxy handling. --- Cargo.toml | 1 + backend/src/api/admin.rs | 29 +++++++++++++++++--- backend/src/main.rs | 4 +-- frontend/src/pages/sections.rs | 5 +++- migrations/20260110000000_initial_schema.sql | 18 +++++++----- 5 files changed, 43 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d08ad214..b12e1081 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = ["frontend", "backend", "shared"] +# 'hgen' is a standalone utility excluded from the workspace so it doesn't build by default. exclude = ["hgen"] [workspace.dependencies] diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index aca3348a..77ec1d44 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -101,7 +101,23 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor std::env::var("TRUSTED_PROXY_IPS") .unwrap_or_default() .split(',') - .filter_map(|s| s.trim().parse().ok()) + .filter_map(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + return None; + } + match trimmed.parse() { + Ok(ip) => Some(ip), + Err(e) => { + tracing::warn!( + "Invalid IP address in TRUSTED_PROXY_IPS '{}': {}", + trimmed, + e + ); + None + } + } + }) .collect() }); @@ -122,7 +138,8 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .and_then(|h| h.to_str().ok()) { // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration - // uses `proxy_add_x_forwarded_for`, which appends the connecting client's IP to the right. + // uses `proxy_add_x_forwarded_for`, which appends the connecting peer's IP (the hop right before Nginx) to the right. + // We pick the rightmost IP because that is the most trusted hop added by our reverse proxy, preventing client-side spoofing. // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer // will put its own IP rightmost, making all traffic share one rate limit bucket. if let Some(last_ip) = forwarded_for.split(',').next_back() { @@ -322,6 +339,10 @@ async fn me( headers: HeaderMap, peer_info: Option>, ) -> Result, StatusCode> { + // Design Note: The /me endpoint validates the JWT cryptographically but does not query the database. + // This means a deleted user's JWT remains valid until expiration (24h). For a single-admin personal site, + // this is an acceptable performance trade-off. `change_password` does perform a DB lookup. + let token = headers .get("Authorization") .and_then(|h| h.to_str().ok()) @@ -329,7 +350,7 @@ async fn me( .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); - let _token_data = jsonwebtoken::decode::( + let token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, @@ -344,7 +365,7 @@ async fn me( Ok(Json(serde_json::json!({ "authenticated": true, - "user_id": _token_data.claims.sub + "user_id": token_data.claims.sub }))) } diff --git a/backend/src/main.rs b/backend/src/main.rs index 8dce6269..0f2f9986 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -51,8 +51,8 @@ async fn main() -> Result<(), Box> { // With WAL mode, SQLite allows concurrent readers, but all writers are still // serialized with a single write lock. Setting max_connections(5) helps with concurrent - // reads. We explicitly set min_connections(1) to reflect the serialized write constraint, - // though the pool handles queuing writes against each other up to the busy timeout. + // reads. We explicitly set min_connections(1) to keep one connection warm + // to avoid cold-start latency. let pool = SqlitePoolOptions::new() .max_connections(5) .min_connections(1) diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 094a0b83..509dda01 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -222,7 +222,10 @@ fn linkify_images(html: &str) -> String { if let Some(src_url) = src { let is_safe_scheme = src_url.starts_with("http://") || src_url.starts_with("https://") - || src_url.starts_with("data:image/") + || src_url.starts_with("data:image/png") + || src_url.starts_with("data:image/jpeg") + || src_url.starts_with("data:image/gif") + || src_url.starts_with("data:image/webp") || src_url.starts_with('/'); if is_safe_scheme { diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 74513d52..264d7439 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -1,6 +1,10 @@ +-- MACRO: UUID_V4_GENERATOR +-- Expression: (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))) +-- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits. + -- Users (Admin) CREATE TABLE users ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), username TEXT NOT NULL UNIQUE, password_hash TEXT NOT NULL, @@ -9,7 +13,7 @@ CREATE TABLE users ( -- Articles (Journalism - Imported/Synced) CREATE TABLE articles ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), wp_id BIGINT UNIQUE, -- External ID from WordPress slug TEXT NOT NULL UNIQUE, @@ -27,7 +31,7 @@ CREATE TABLE articles ( -- Personal Blog Posts CREATE TABLE blog_posts ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -40,7 +44,7 @@ CREATE TABLE blog_posts ( -- Creative Writing (Stories, Novels, Poetry) CREATE TABLE creative_works ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, @@ -56,7 +60,7 @@ CREATE TABLE creative_works ( CREATE TABLE media_items ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT, description TEXT, @@ -70,7 +74,7 @@ CREATE TABLE media_items ( -- Music CREATE TABLE music_tracks ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), title TEXT NOT NULL, description TEXT, @@ -82,7 +86,7 @@ CREATE TABLE music_tracks ( -- Programming Projects CREATE TABLE projects ( - -- Note: (random() & 3) + 1 provides perfectly uniform UUID variant bits + -- Uses MACRO: UUID_V4_GENERATOR id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), name TEXT NOT NULL, description TEXT, From 2caeb7a969b8b612c2356673a476e8343fadc8c7 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Mon, 23 Mar 2026 17:29:07 -0700 Subject: [PATCH 47/64] refactor: enhance security with XSS protection and IP header warnings, improve database query robustness, and simplify schema. --- .gitignore | 3 +- backend/src/api/admin.rs | 53 +++++++++++--------- backend/src/api/public.rs | 34 ++++++------- frontend/src/pages/sections.rs | 7 ++- migrations/20260110000000_initial_schema.sql | 6 +-- 5 files changed, 56 insertions(+), 47 deletions(-) diff --git a/.gitignore b/.gitignore index 3c7f9a3f..6ab4583f 100644 --- a/.gitignore +++ b/.gitignore @@ -70,5 +70,4 @@ imports/ .sass-cache # Hgen -hgen/ -hgen/* +hgen/target/ diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 77ec1d44..7d26f3f1 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -125,7 +125,9 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor if is_trusted_proxy { // Priority 1: X-Real-IP is checked first. - // Some proxy configurations use X-Real-IP to explicitly pass the client IP, overriding XFF lists. + // WARNING: If Nginx is used, it MUST explicitly strip or override this header from the client + // using `proxy_set_header X-Real-IP $remote_addr;`. If it does not, a client behind the + // trusted proxy can easily spoof their IP bypassing the rate limiter. if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { if let Ok(parsed_ip) = real_ip.parse::() { return Ok(parsed_ip.to_string()); @@ -177,7 +179,8 @@ pub fn router(state: crate::state::AppState) -> Router { // Burst windows completely refresh across restarts. Therefore, the effective rate limiting // window ONLY covers uptime, not absolute calendar time. An attacker who can trigger or observe // restarts could reset their login throttle window. For a low-traffic personal site, this is an - // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. + // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended + // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() @@ -288,7 +291,7 @@ async fn login( } }; - let password_match = std::hint::black_box(verify_password(&req.password, hash_to_verify)); + let password_match = verify_password(&req.password, hash_to_verify); let is_invalid = !is_valid_user || !password_match; if is_invalid { @@ -428,33 +431,37 @@ async fn change_password( )); } - let user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|_| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Invalid user ID format in token".to_string(), - ) - })?; + let user_id_res = uuid::Uuid::parse_str(&token_data.claims.sub); // Verify current password - let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(user_id.to_string()) - .fetch_optional(&pool) - .await - .map_err(|e| { - tracing::error!("Database error fetching user for password change: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Database error".to_string(), - ) - })?; + let user: Option = match &user_id_res { + Ok(id) => sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") + .bind(id.to_string()) + .fetch_optional(&pool) + .await + .map_err(|e| { + tracing::error!("Database error fetching user for password change: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + ) + })?, + Err(_) => None, + }; let (hash_to_verify, is_valid_user) = match user { Some(ref u) => (u.password_hash.as_str(), true), None => (get_dummy_hash(), false), }; - let password_match = - std::hint::black_box(verify_password(&req.current_password, hash_to_verify)); + let password_match = verify_password(&req.current_password, hash_to_verify); + + if user_id_res.is_err() { + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + "Invalid user ID format in token".to_string(), + )); + } if !is_valid_user || !password_match { return Err(( @@ -474,7 +481,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id.to_string()) + .bind(user_id_res.unwrap().to_string()) .execute(&pool) .await .map_err(|e| { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 60a850b9..f914c108 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -51,25 +51,25 @@ async fn list_articles( .bind(limit) .bind(offset) .try_map(|row: sqlx::sqlite::SqliteRow| { - let origin_str: String = row.get("origin"); + let origin_str: String = row.try_get("origin")?; let origin = match origin_str.as_str() { "imported" => shared::Origin::Imported, "synced" => shared::Origin::Synced, _ => shared::Origin::Local, }; - let id_str: String = row.get("id"); + let id_str: String = row.try_get("id")?; let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; Ok(Article { id, - wp_id: row.get("wp_id"), - slug: row.get("slug"), - title: row.get("title"), - subtitle: row.get("subtitle"), - excerpt: row.get("excerpt"), - content: row.get("content"), - cover_image_url: row.get("cover_image_url"), - author: row.get("author"), - published_at: row.get("published_at"), + wp_id: row.try_get("wp_id")?, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + subtitle: row.try_get("subtitle")?, + excerpt: row.try_get("excerpt")?, + content: row.try_get("content")?, + cover_image_url: row.try_get("cover_image_url")?, + author: row.try_get("author")?, + published_at: row.try_get("published_at")?, origin, }) }) @@ -94,7 +94,7 @@ async fn list_blog_posts( .bind(limit) .bind(offset) .try_map(|row: sqlx::sqlite::SqliteRow| { - let tags_str: Option = row.get("tags"); + let tags_str: Option = row.try_get("tags")?; let tags = match tags_str { Some(s) => match serde_json::from_str(&s) { Ok(t) => Some(t), @@ -102,14 +102,14 @@ async fn list_blog_posts( }, None => None, }; - let id_str: String = row.get("id"); + let id_str: String = row.try_get("id")?; let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; Ok(BlogPost { id, - slug: row.get("slug"), - title: row.get("title"), - content: row.get("content"), - published_at: row.get("published_at"), + slug: row.try_get("slug")?, + title: row.try_get("title")?, + content: row.try_get("content")?, + published_at: row.try_get("published_at")?, tags, }) }) diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 509dda01..d3918ffc 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -229,9 +229,14 @@ fn linkify_images(html: &str) -> String { || src_url.starts_with('/'); if is_safe_scheme { + let safe_url = src_url + .replace("&", "&") + .replace("\"", """) + .replace("<", "<") + .replace(">", ">"); let wrapper_start = format!( "", - src_url + safe_url ); let wrapper_end = ""; diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 264d7439..2b7f5cc6 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -25,8 +25,7 @@ CREATE TABLE articles ( author TEXT NOT NULL, published_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), origin TEXT NOT NULL DEFAULT 'local', -- 'imported', 'synced', 'local' - created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), - updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Personal Blog Posts @@ -38,8 +37,7 @@ CREATE TABLE blog_posts ( content TEXT NOT NULL, -- Markdown/Rich Text published_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), tags TEXT, -- JSON Array - created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), - updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) + created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); -- Creative Writing (Stories, Novels, Poetry) From abd6b736c8295783f159ce729308384005344caa Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 01:30:53 -0700 Subject: [PATCH 48/64] refactor: Refine trusted proxy IP extraction, enforce pagination offset limits, improve admin token error handling, and expand CI checks for `hgen`. --- .github/workflows/ci.yml | 9 ++++++++- backend/src/api/admin.rs | 19 +++++-------------- backend/src/api/public.rs | 10 ++++++++-- scripts/remote_build.sh | 5 +++++ 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 19b6f1f3..5637a65c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,8 @@ jobs: components: rustfmt - name: Run cargo fmt run: cargo fmt --all -- --check - + - name: Run cargo fmt for hgen + run: cd hgen && cargo fmt --all -- --check clippy: name: Lint with Clippy runs-on: ubuntu-latest @@ -33,6 +34,8 @@ jobs: components: clippy - name: Run cargo clippy run: cargo clippy --workspace --all-targets --all-features -- -D warnings + - name: Run cargo clippy for hgen + run: cd hgen && cargo clippy --all-targets --all-features -- -D warnings test: name: Run Tests @@ -45,3 +48,7 @@ jobs: uses: taiki-e/install-action@nextest - name: Run tests with nextest run: cargo nextest run --workspace --all-features + - name: Check hgen compilation + run: cd hgen && cargo check --all-targets --all-features + - name: Run tests for hgen + run: cd hgen && cargo test diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 7d26f3f1..9d121580 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -124,16 +124,7 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); if is_trusted_proxy { - // Priority 1: X-Real-IP is checked first. - // WARNING: If Nginx is used, it MUST explicitly strip or override this header from the client - // using `proxy_set_header X-Real-IP $remote_addr;`. If it does not, a client behind the - // trusted proxy can easily spoof their IP bypassing the rate limiter. - if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { - if let Ok(parsed_ip) = real_ip.parse::() { - return Ok(parsed_ip.to_string()); - } - } - // Priority 2: X-Forwarded-For + // Priority 1: X-Forwarded-For if let Some(forwarded_for) = req .headers() .get("X-Forwarded-For") @@ -157,9 +148,9 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor ); } - peer_ip + Ok(peer_ip .map(|ip| ip.to_string()) - .ok_or(tower_governor::GovernorError::UnableToExtractKey) + .unwrap_or_else(|| "unknown".to_string())) } } @@ -458,8 +449,8 @@ async fn change_password( if user_id_res.is_err() { return Err(( - StatusCode::INTERNAL_SERVER_ERROR, - "Invalid user ID format in token".to_string(), + StatusCode::UNAUTHORIZED, + "Invalid token".to_string(), )); } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index f914c108..57574065 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -46,7 +46,10 @@ async fn list_articles( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0).min(10_000); + let offset = query.offset.unwrap_or(0); + if offset > 10_000 { + return Err(axum::http::StatusCode::BAD_REQUEST); + } match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) @@ -89,7 +92,10 @@ async fn list_blog_posts( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0).min(10_000); + let offset = query.offset.unwrap_or(0); + if offset > 10_000 { + return Err(axum::http::StatusCode::BAD_REQUEST); + } match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index d45c56a3..bf55a52d 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -58,3 +58,8 @@ else echo "Unknown target: $TARGET" exit 1 fi + +echo "=====================================================================" +echo "WARNING: Check your .env file for TRUSTED_PROXY_IPS." +echo "Docker bridge IPs may change. Verify them if using rate limiting!" +echo "=====================================================================" From 72e6b00a0124b2c8dbcec878fcde3a91d69e6b2e Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 02:18:21 -0700 Subject: [PATCH 49/64] feat: Implement cursor-based pagination for articles, enhance trusted proxy IP detection and rate limit client IP extraction, and update article image placeholders to use inline SVGs. --- backend/src/api/admin.rs | 35 +++---- backend/src/api/public.rs | 186 ++++++++++++++++++++++----------- backend/src/main.rs | 27 +++-- frontend/src/pages/sections.rs | 15 ++- 4 files changed, 174 insertions(+), 89 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 9d121580..e6365e69 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -124,7 +124,14 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); if is_trusted_proxy { - // Priority 1: X-Forwarded-For + // Priority 1: X-Real-IP + if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { + if let Ok(parsed_ip) = real_ip.trim().parse::() { + return Ok(parsed_ip.to_string()); + } + } + + // Priority 2: X-Forwarded-For if let Some(forwarded_for) = req .headers() .get("X-Forwarded-For") @@ -137,7 +144,11 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor // will put its own IP rightmost, making all traffic share one rate limit bucket. if let Some(last_ip) = forwarded_for.split(',').next_back() { if let Ok(parsed_ip) = last_ip.trim().parse::() { - tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); + if Some(parsed_ip) == peer_ip { + tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); + } else { + tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); + } return Ok(parsed_ip.to_string()); } } @@ -173,7 +184,7 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let login_governor_config = std::sync::Arc::new( + let auth_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(TrustedProxyIpKeyExtractor) .per_second(1) @@ -183,20 +194,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: login_governor_config, + config: auth_governor_config.clone(), }; - let password_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - let password_governor_layer = tower_governor::GovernorLayer { - config: password_governor_config, + config: auth_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -448,10 +450,7 @@ async fn change_password( let password_match = verify_password(&req.current_password, hash_to_verify); if user_id_res.is_err() { - return Err(( - StatusCode::UNAUTHORIZED, - "Invalid token".to_string(), - )); + return Err((StatusCode::UNAUTHORIZED, "Invalid token".to_string())); } if !is_valid_user || !password_match { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 57574065..ede196e0 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -6,6 +6,7 @@ use sqlx::SqlitePool; pub struct Pagination { pub limit: Option, pub offset: Option, + pub before: Option, } pub fn router(state: crate::state::AppState) -> Router { let public_governor_layer = tower_governor::GovernorLayer { @@ -46,39 +47,72 @@ async fn list_articles( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0); - if offset > 10_000 { - return Err(axum::http::StatusCode::BAD_REQUEST); - } - match sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") - .bind(limit) - .bind(offset) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let origin_str: String = row.try_get("origin")?; - let origin = match origin_str.as_str() { - "imported" => shared::Origin::Imported, - "synced" => shared::Origin::Synced, - _ => shared::Origin::Local, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(Article { - id, - wp_id: row.try_get("wp_id")?, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - subtitle: row.try_get("subtitle")?, - excerpt: row.try_get("excerpt")?, - content: row.try_get("content")?, - cover_image_url: row.try_get("cover_image_url")?, - author: row.try_get("author")?, - published_at: row.try_get("published_at")?, - origin, + + let rows_res = if let Some(before) = query.before { + sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") + .bind(before) + .bind(limit) + .try_map(|row: sqlx::sqlite::SqliteRow| { + let origin_str: String = row.try_get("origin")?; + let origin = match origin_str.as_str() { + "imported" => shared::Origin::Imported, + "synced" => shared::Origin::Synced, + _ => shared::Origin::Local, + }; + let id_str: String = row.try_get("id")?; + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(Article { + id, + wp_id: row.try_get("wp_id")?, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + subtitle: row.try_get("subtitle")?, + excerpt: row.try_get("excerpt")?, + content: row.try_get("content")?, + cover_image_url: row.try_get("cover_image_url")?, + author: row.try_get("author")?, + published_at: row.try_get("published_at")?, + origin, + }) + }) + .fetch_all(&pool) + .await + } else { + let offset = query.offset.unwrap_or(0); + if offset > 10_000 { + return Err(axum::http::StatusCode::BAD_REQUEST); + } + sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") + .bind(limit) + .bind(offset) + .try_map(|row: sqlx::sqlite::SqliteRow| { + let origin_str: String = row.try_get("origin")?; + let origin = match origin_str.as_str() { + "imported" => shared::Origin::Imported, + "synced" => shared::Origin::Synced, + _ => shared::Origin::Local, + }; + let id_str: String = row.try_get("id")?; + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(Article { + id, + wp_id: row.try_get("wp_id")?, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + subtitle: row.try_get("subtitle")?, + excerpt: row.try_get("excerpt")?, + content: row.try_get("content")?, + cover_image_url: row.try_get("cover_image_url")?, + author: row.try_get("author")?, + published_at: row.try_get("published_at")?, + origin, + }) }) - }) - .fetch_all(&pool) - .await - { + .fetch_all(&pool) + .await + }; + + match rows_res { Ok(articles) => Ok(Json(articles)), Err(e) => { tracing::error!("Failed to fetch articles: {}", e); @@ -92,36 +126,66 @@ async fn list_blog_posts( Query(query): Query, ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); - let offset = query.offset.unwrap_or(0); - if offset > 10_000 { - return Err(axum::http::StatusCode::BAD_REQUEST); - } - match sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") - .bind(limit) - .bind(offset) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let tags_str: Option = row.try_get("tags")?; - let tags = match tags_str { - Some(s) => match serde_json::from_str(&s) { - Ok(t) => Some(t), - Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), - }, - None => None, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(BlogPost { - id, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - content: row.try_get("content")?, - published_at: row.try_get("published_at")?, - tags, + + let rows_res = if let Some(before) = query.before { + sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") + .bind(before) + .bind(limit) + .try_map(|row: sqlx::sqlite::SqliteRow| { + let tags_str: Option = row.try_get("tags")?; + let tags = match tags_str { + Some(s) => match serde_json::from_str(&s) { + Ok(t) => Some(t), + Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), + }, + None => None, + }; + let id_str: String = row.try_get("id")?; + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(BlogPost { + id, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + content: row.try_get("content")?, + published_at: row.try_get("published_at")?, + tags, + }) + }) + .fetch_all(&pool) + .await + } else { + let offset = query.offset.unwrap_or(0); + if offset > 10_000 { + return Err(axum::http::StatusCode::BAD_REQUEST); + } + sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") + .bind(limit) + .bind(offset) + .try_map(|row: sqlx::sqlite::SqliteRow| { + let tags_str: Option = row.try_get("tags")?; + let tags = match tags_str { + Some(s) => match serde_json::from_str(&s) { + Ok(t) => Some(t), + Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), + }, + None => None, + }; + let id_str: String = row.try_get("id")?; + let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(BlogPost { + id, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + content: row.try_get("content")?, + published_at: row.try_get("published_at")?, + tags, + }) }) - }) - .fetch_all(&pool) - .await - { + .fetch_all(&pool) + .await + }; + + match rows_res { Ok(posts) => Ok(Json(posts)), Err(e) => { tracing::error!("Failed to fetch blog posts: {}", e); diff --git a/backend/src/main.rs b/backend/src/main.rs index 0f2f9986..4aa0686e 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -84,14 +84,27 @@ async fn main() -> Result<(), Box> { match std::env::var("TRUSTED_PROXY_IPS").as_deref() { Err(_) => panic!("TRUSTED_PROXY_IPS must be set in production. Otherwise, all users behind a proxy will share a single rate-limit bucket."), Ok(ips) if ips.trim().is_empty() => panic!("TRUSTED_PROXY_IPS is set but empty. This will cause all proxies to be untrusted, collapsing rate limits."), - Ok("172.18.0.2,172.18.0.3") | Ok("172.18.0.2, 172.18.0.3") => { - tracing::warn!("====================================================================="); - tracing::warn!("WARNING: TRUSTED_PROXY_IPS is set to the default Docker bridge IPs."); - tracing::warn!("Container IPs can change on restart. Rate limiting may fail open if these are incorrect."); - tracing::warn!("Please verify these IPs post-deploy or use a more robust mechanism."); - tracing::warn!("====================================================================="); + Ok(ips) => { + let default_ips = ips.split(',').map(|s| s.trim()).filter(|s| !s.is_empty()); + let mut has_private = false; + for ip_str in default_ips { + if let Ok(std::net::IpAddr::V4(v4)) = ip_str.parse::() { + let octets = v4.octets(); + if octets[0] == 10 || (octets[0] == 172 && (16..=31).contains(&octets[1])) { + has_private = true; + break; + } + } + } + + if has_private { + tracing::warn!("====================================================================="); + tracing::warn!("WARNING: TRUSTED_PROXY_IPS contains private (e.g., Docker bridge) IPs."); + tracing::warn!("Container IPs can change on restart. Rate limiting may fail open if these are incorrect."); + tracing::warn!("Please verify these IPs post-deploy or use a more robust mechanism."); + tracing::warn!("====================================================================="); + } } - Ok(_) => {} } } diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index d3918ffc..f3145129 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -323,7 +323,7 @@ pub fn JournalismPage() -> impl IntoView { let preview_text = extract_body_preview(&article.content_html) .unwrap_or_else(|| article.excerpt.clone()); let image = article.images.first().cloned(); - let thumb_src = image.clone().unwrap_or_else(|| "data:image/svg+xml;utf8,Image coming soon".to_string()); + let has_image = image.is_some(); let date = extract_printed_date(&article.content_html) .unwrap_or_else(|| article.display_date.clone()); let date = format_cp_style(&date); @@ -331,8 +331,17 @@ pub fn JournalismPage() -> impl IntoView { view! {
- article thumbnail - {image.is_none().then(|| view! {
"Image coming soon"
})} + {if has_image { + view! { article thumbnail }.into_any() + } else { + view! { + + + "Image coming soon" + + }.into_any() + }} + {(!has_image).then(|| view! {
"Image coming soon"
})}

{date}

From 797a30635cc0097d3373903311243ed9711f20fc Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 09:21:49 -0700 Subject: [PATCH 50/64] feat: Add `192.168.x.x` to trusted proxy IP check, validate `before` query parameters, refactor `ensure_data_dir` and admin password change logic, and improve image scheme logging. --- backend/src/api/admin.rs | 66 ++++++++++++++++++---------------- backend/src/api/public.rs | 6 ++++ backend/src/main.rs | 4 +-- frontend/src/pages/sections.rs | 14 ++++++++ hgen/src/main.rs | 5 ++- scripts/remote_build.sh | 14 +++++--- scripts/setup-dev.sh | 2 +- 7 files changed, 71 insertions(+), 40 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index e6365e69..5e10364c 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -184,21 +184,26 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let auth_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - let login_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let password_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config, + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ), }; let me_governor_layer = tower_governor::GovernorLayer { @@ -424,23 +429,26 @@ async fn change_password( )); } - let user_id_res = uuid::Uuid::parse_str(&token_data.claims.sub); + let user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|e| { + tracing::error!("Valid JWT contained invalid UUID string: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Invalid token payload".to_string(), + ) + })?; // Verify current password - let user: Option = match &user_id_res { - Ok(id) => sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(id.to_string()) - .fetch_optional(&pool) - .await - .map_err(|e| { - tracing::error!("Database error fetching user for password change: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Database error".to_string(), - ) - })?, - Err(_) => None, - }; + let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") + .bind(user_id.to_string()) + .fetch_optional(&pool) + .await + .map_err(|e| { + tracing::error!("Database error fetching user for password change: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + ) + })?; let (hash_to_verify, is_valid_user) = match user { Some(ref u) => (u.password_hash.as_str(), true), @@ -449,10 +457,6 @@ async fn change_password( let password_match = verify_password(&req.current_password, hash_to_verify); - if user_id_res.is_err() { - return Err((StatusCode::UNAUTHORIZED, "Invalid token".to_string())); - } - if !is_valid_user || !password_match { return Err(( StatusCode::FORBIDDEN, @@ -471,7 +475,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id_res.unwrap().to_string()) + .bind(user_id.to_string()) .execute(&pool) .await .map_err(|e| { diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index ede196e0..c0966e24 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -49,6 +49,9 @@ async fn list_articles( let limit = query.limit.unwrap_or(20).min(50); let rows_res = if let Some(before) = query.before { + if chrono::DateTime::parse_from_rfc3339(&before).is_err() { + return Err(axum::http::StatusCode::BAD_REQUEST); + } sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(before) .bind(limit) @@ -128,6 +131,9 @@ async fn list_blog_posts( let limit = query.limit.unwrap_or(20).min(50); let rows_res = if let Some(before) = query.before { + if chrono::DateTime::parse_from_rfc3339(&before).is_err() { + return Err(axum::http::StatusCode::BAD_REQUEST); + } sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(before) .bind(limit) diff --git a/backend/src/main.rs b/backend/src/main.rs index 4aa0686e..d95c1a0c 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -90,7 +90,7 @@ async fn main() -> Result<(), Box> { for ip_str in default_ips { if let Ok(std::net::IpAddr::V4(v4)) = ip_str.parse::() { let octets = v4.octets(); - if octets[0] == 10 || (octets[0] == 172 && (16..=31).contains(&octets[1])) { + if octets[0] == 10 || (octets[0] == 172 && (16..=31).contains(&octets[1])) || (octets[0] == 192 && octets[1] == 168) { has_private = true; break; } @@ -101,7 +101,7 @@ async fn main() -> Result<(), Box> { tracing::warn!("====================================================================="); tracing::warn!("WARNING: TRUSTED_PROXY_IPS contains private (e.g., Docker bridge) IPs."); tracing::warn!("Container IPs can change on restart. Rate limiting may fail open if these are incorrect."); - tracing::warn!("Please verify these IPs post-deploy or use a more robust mechanism."); + tracing::warn!("Please verify these IPs post-deploy or use a more robust mechanism like static IPs (--ip) or docker network inspect."); tracing::warn!("====================================================================="); } } diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index f3145129..0bd98290 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -246,6 +246,20 @@ fn linkify_images(html: &str) -> String { search_pos = abs_open + new_content.len(); continue; + } else { + #[cfg(not(target_arch = "wasm32"))] + tracing::debug!( + "Skipped unsafe image scheme in journalism article: {}", + src_url + ); + #[cfg(target_arch = "wasm32")] + web_sys::console::log_1( + &format!( + "Skipped unsafe image scheme in journalism article: {}", + src_url + ) + .into(), + ); } } search_pos = abs_close; diff --git a/hgen/src/main.rs b/hgen/src/main.rs index 209592de..d72e564a 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -13,7 +13,10 @@ use argon2::{ fn main() { let mut password = String::new(); std::io::stdin().read_line(&mut password).expect("Failed to read password"); - let password = password.strip_suffix("\r\n").or_else(|| password.strip_suffix("\n")).unwrap_or(&password); + let password = password + .strip_suffix("\r\n") + .or_else(|| password.strip_suffix("\n")) + .unwrap_or(&password); let salt = SaltString::generate(&mut OsRng); let params = argon2::Params::new( shared::auth::ARGON2_M_COST, diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index bf55a52d..99103dcd 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -10,6 +10,11 @@ echo "Remote Build Target: $TARGET" export DOCKER_BUILDKIT=1 export COMPOSE_DOCKER_CLI_BUILD=1 +ensure_data_dir() { + echo "Ensuring data directory exists..." + mkdir -p data && chmod 700 data && sudo chown 1000:1000 data +} + if [ ! -f .env ]; then echo "Generating new .env file with defaults..." cat < .env @@ -20,7 +25,7 @@ DATABASE_URL=sqlite:////app/data/sqlite.db ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') # Warning: Ephemeral Docker Bridge IPs change on restart. -# In production, use the Docker network name resolved at startup, or assign fixed IPs with --ip. +# In production, use the Docker network name resolved at startup, or assign fixed IPs with --ip via docker-compose. TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 EOF else @@ -34,21 +39,20 @@ if [ "$TARGET" = "all" ] || [ "$TARGET" = "backend" ]; then --cache-from portfolio-chef:latest \ -t portfolio-chef . - echo "Ensuring data directory exists..." - mkdir -p data && chmod 700 data && sudo chown 1000:1000 data + ensure_data_dir fi if [ "$TARGET" = "all" ]; then echo "Building and starting ALL services with BuildKit caching..." sudo DOCKER_BUILDKIT=1 docker compose -f compose.prod.yaml build \ --build-arg BUILDKIT_INLINE_CACHE=1 - mkdir -p data && chmod 700 data && sudo chown 1000:1000 data + ensure_data_dir sudo docker compose -f compose.prod.yaml up -d --remove-orphans elif [ "$TARGET" = "backend" ]; then echo "Building and restarting BACKEND (portfolio) service with caching..." sudo DOCKER_BUILDKIT=1 docker compose -f compose.prod.yaml build \ --build-arg BUILDKIT_INLINE_CACHE=1 portfolio - mkdir -p data && chmod 700 data && sudo chown 1000:1000 data + ensure_data_dir sudo docker compose -f compose.prod.yaml up -d --no-deps portfolio elif [ "$TARGET" = "frontend" ]; then echo "Frontend is part of the backend binary in this setup (SSR)." diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 466bb912..33921be3 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -85,7 +85,7 @@ fi # Fallback to python UUID or kernel uuid if uuidgen missing ADMIN_UUID=$(uuidgen 2>/dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c 'import uuid; print(uuid.uuid4())' 2>/dev/null || { echo "❌ Could not generate a UUID. Please install uuidgen."; exit 1; }) -SAFE_UUID=$(printf '%q' "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') +SAFE_UUID=$(echo "$ADMIN_UUID" | tr -cd 'a-fA-F0-9-') if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$ ]]; then echo "❌ Invalid Admin UUID format generated: $SAFE_UUID" From 5e19df823cda11bd0509ecc3d8f0c5c4057ee19b Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 10:06:05 -0700 Subject: [PATCH 51/64] refactor: Streamline admin login to JSON-only, implement distinct public API rate limits, and refactor database row mapping. --- backend/src/api/admin.rs | 53 +++-------- backend/src/api/public.rs | 159 ++++++++++++++------------------- frontend/src/pages/sections.rs | 1 + 3 files changed, 79 insertions(+), 134 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 5e10364c..1587b7b6 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -5,10 +5,8 @@ use argon2::{ use axum::body::to_bytes; use axum::body::Body; use axum::http::{header, Request}; -use axum::response::Html; use axum::response::IntoResponse; use axum::response::Json; -use axum::response::Redirect; use axum::{ extract::State, http::{HeaderMap, StatusCode}, @@ -159,9 +157,9 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor ); } - Ok(peer_ip + peer_ip .map(|ip| ip.to_string()) - .unwrap_or_else(|| "unknown".to_string())) + .ok_or(tower_governor::GovernorError::UnableToExtractKey) } } @@ -237,29 +235,19 @@ async fn login( .get(header::CONTENT_TYPE) .and_then(|v| v.to_str().ok()) .unwrap_or(""); - let accept = parts - .headers - .get(header::ACCEPT) - .and_then(|v| v.to_str().ok()) - .unwrap_or(""); let bytes = to_bytes(body, 16 * 1024) .await .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid request body".to_string()))?; - let req: LoginRequest = if content_type.contains("application/json") { - serde_json::from_slice(&bytes) - .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))? - } else if content_type.contains("application/x-www-form-urlencoded") - || content_type.contains("multipart/form-data") - { - serde_urlencoded::from_bytes(&bytes) - .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid form data".to_string()))? - } else { + if !content_type.contains("application/json") { return Err(( StatusCode::UNSUPPORTED_MEDIA_TYPE, "Unsupported content type".to_string(), )); - }; + } + + let req: LoginRequest = serde_json::from_slice(&bytes) + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid JSON".to_string()))?; // Prevent extremely long passwords from exhausting Argon2 CPU time. if req.password.len() > 128 { @@ -293,19 +281,12 @@ async fn login( let is_invalid = !is_valid_user || !password_match; if is_invalid { - if content_type.contains("application/x-www-form-urlencoded") - || content_type.contains("multipart/form-data") - || accept.contains("text/html") - { - return Ok(Redirect::to("/admin/login?error=invalid").into_response()); - } - return Err((StatusCode::UNAUTHORIZED, "Invalid credentials".to_string())); } let exp = (Utc::now() + Duration::hours(24)).timestamp() as usize; let claims = Claims { - sub: user.unwrap().id, + sub: user.expect("is_valid_user guarantees Some").id, exp, }; @@ -322,18 +303,7 @@ async fn login( ) })?; - if content_type.contains("application/x-www-form-urlencoded") - || content_type.contains("multipart/form-data") - || accept.contains("text/html") - { - let html = format!( - r#""#, - token - ); - Ok(Html(html).into_response()) - } else { - Ok(Json(LoginResponse { token }).into_response()) - } + Ok(Json(LoginResponse { token }).into_response()) } async fn me( @@ -420,12 +390,11 @@ async fn change_password( )); } - let char_count = req.new_password.chars().count(); let byte_count = req.new_password.len(); - if char_count < 12 || byte_count > 128 { + if !(12..=128).contains(&byte_count) { return Err(( StatusCode::BAD_REQUEST, - "New password length must be at least 12 characters and no more than 128 bytes (policy limit).".to_string(), + "New password length must be at least 12 bytes and no more than 128 bytes (policy limit).".to_string(), )); } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index c0966e24..6fdc87ea 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -9,7 +9,18 @@ pub struct Pagination { pub before: Option, } pub fn router(state: crate::state::AppState) -> Router { - let public_governor_layer = tower_governor::GovernorLayer { + let articles_governor_layer = tower_governor::GovernorLayer { + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::admin::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ), + }; + + let blog_governor_layer = tower_governor::GovernorLayer { config: std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::admin::TrustedProxyIpKeyExtractor) @@ -20,18 +31,15 @@ pub fn router(state: crate::state::AppState) -> Router { ), }; - // NOTE: Cloning `GovernorLayer` clones the underlying `Arc`. - // Both `/api/articles` and `/api/blog` share the exact same rate limit bucket per IP. - // Bursting one endpoint will exhaust the quota for the other. This is intentional. Router::new() .route("/health", get(health_check)) .route( "/api/articles", - get(list_articles).route_layer(public_governor_layer.clone()), + get(list_articles).route_layer(articles_governor_layer), ) .route( "/api/blog", - get(list_blog_posts).route_layer(public_governor_layer), + get(list_blog_posts).route_layer(blog_governor_layer), ) .with_state(state) } @@ -55,29 +63,7 @@ async fn list_articles( sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(before) .bind(limit) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let origin_str: String = row.try_get("origin")?; - let origin = match origin_str.as_str() { - "imported" => shared::Origin::Imported, - "synced" => shared::Origin::Synced, - _ => shared::Origin::Local, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(Article { - id, - wp_id: row.try_get("wp_id")?, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - subtitle: row.try_get("subtitle")?, - excerpt: row.try_get("excerpt")?, - content: row.try_get("content")?, - cover_image_url: row.try_get("cover_image_url")?, - author: row.try_get("author")?, - published_at: row.try_get("published_at")?, - origin, - }) - }) + .try_map(map_article_row) .fetch_all(&pool) .await } else { @@ -88,29 +74,7 @@ async fn list_articles( sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let origin_str: String = row.try_get("origin")?; - let origin = match origin_str.as_str() { - "imported" => shared::Origin::Imported, - "synced" => shared::Origin::Synced, - _ => shared::Origin::Local, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(Article { - id, - wp_id: row.try_get("wp_id")?, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - subtitle: row.try_get("subtitle")?, - excerpt: row.try_get("excerpt")?, - content: row.try_get("content")?, - cover_image_url: row.try_get("cover_image_url")?, - author: row.try_get("author")?, - published_at: row.try_get("published_at")?, - origin, - }) - }) + .try_map(map_article_row) .fetch_all(&pool) .await }; @@ -137,26 +101,7 @@ async fn list_blog_posts( sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(before) .bind(limit) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let tags_str: Option = row.try_get("tags")?; - let tags = match tags_str { - Some(s) => match serde_json::from_str(&s) { - Ok(t) => Some(t), - Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), - }, - None => None, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(BlogPost { - id, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - content: row.try_get("content")?, - published_at: row.try_get("published_at")?, - tags, - }) - }) + .try_map(map_blog_post_row) .fetch_all(&pool) .await } else { @@ -167,26 +112,7 @@ async fn list_blog_posts( sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) .bind(offset) - .try_map(|row: sqlx::sqlite::SqliteRow| { - let tags_str: Option = row.try_get("tags")?; - let tags = match tags_str { - Some(s) => match serde_json::from_str(&s) { - Ok(t) => Some(t), - Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), - }, - None => None, - }; - let id_str: String = row.try_get("id")?; - let id = id_str.parse::().map_err(|e| sqlx::Error::Decode(Box::new(e)))?; - Ok(BlogPost { - id, - slug: row.try_get("slug")?, - title: row.try_get("title")?, - content: row.try_get("content")?, - published_at: row.try_get("published_at")?, - tags, - }) - }) + .try_map(map_blog_post_row) .fetch_all(&pool) .await }; @@ -199,3 +125,52 @@ async fn list_blog_posts( } } } + +fn map_article_row(row: sqlx::sqlite::SqliteRow) -> Result { + let origin_str: String = row.try_get("origin")?; + let origin = match origin_str.as_str() { + "imported" => shared::Origin::Imported, + "synced" => shared::Origin::Synced, + _ => shared::Origin::Local, + }; + let id_str: String = row.try_get("id")?; + let id = id_str + .parse::() + .map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(Article { + id, + wp_id: row.try_get("wp_id")?, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + subtitle: row.try_get("subtitle")?, + excerpt: row.try_get("excerpt")?, + content: row.try_get("content")?, + cover_image_url: row.try_get("cover_image_url")?, + author: row.try_get("author")?, + published_at: row.try_get("published_at")?, + origin, + }) +} + +fn map_blog_post_row(row: sqlx::sqlite::SqliteRow) -> Result { + let tags_str: Option = row.try_get("tags")?; + let tags = match tags_str { + Some(s) => match serde_json::from_str(&s) { + Ok(t) => Some(t), + Err(e) => return Err(sqlx::Error::Decode(Box::new(e))), + }, + None => None, + }; + let id_str: String = row.try_get("id")?; + let id = id_str + .parse::() + .map_err(|e| sqlx::Error::Decode(Box::new(e)))?; + Ok(BlogPost { + id, + slug: row.try_get("slug")?, + title: row.try_get("title")?, + content: row.try_get("content")?, + published_at: row.try_get("published_at")?, + tags, + }) +} diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 0bd98290..87df71c7 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -399,6 +399,7 @@ pub fn JournalismArticlePage() -> impl IntoView { web_sys::console::log_1(&"Checking auth token...".into()); if let Ok(Some(storage)) = web_sys::window().unwrap().local_storage() { if let Ok(Some(t)) = storage.get_item("admin_token") { + #[cfg(debug_assertions)] web_sys::console::log_1(&format!("Found token: {}", t).into()); if !t.is_empty() { _set_token.set(t); From bfe7ab061fa0e15572503c04f0efb25b79d6c0bb Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 10:16:50 -0700 Subject: [PATCH 52/64] Refactor rate limiting key extractor to a common module, add input validation for query parameters and username length, and initialize a dummy hash at startup. --- backend/src/api/admin.rs | 95 +++++---------------------------------- backend/src/api/mod.rs | 83 ++++++++++++++++++++++++++++++++++ backend/src/api/public.rs | 35 ++++++++------- backend/src/main.rs | 1 + scripts/setup-dev.sh | 4 +- 5 files changed, 116 insertions(+), 102 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 1587b7b6..8ee23594 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -19,6 +19,10 @@ use serde::{Deserialize, Serialize}; use sqlx::SqlitePool; use std::sync::OnceLock; +pub fn init_dummy_hash() { + let _ = get_dummy_hash(); +} + fn get_dummy_hash() -> &'static str { static DUMMY_HASH: OnceLock = OnceLock::new(); DUMMY_HASH.get_or_init(|| { @@ -82,87 +86,6 @@ fn hash_password(password: &str) -> Result { .map(|hash| hash.to_string()) } -#[derive(Clone)] -pub struct TrustedProxyIpKeyExtractor; - -impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor { - type Key = String; - - fn extract(&self, req: &Request) -> Result { - let peer_ip = req - .extensions() - .get::>() - .map(|ci| ci.0.ip()); - - static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); - let trusted_ips = TRUSTED_PROXY_IPS.get_or_init(|| { - std::env::var("TRUSTED_PROXY_IPS") - .unwrap_or_default() - .split(',') - .filter_map(|s| { - let trimmed = s.trim(); - if trimmed.is_empty() { - return None; - } - match trimmed.parse() { - Ok(ip) => Some(ip), - Err(e) => { - tracing::warn!( - "Invalid IP address in TRUSTED_PROXY_IPS '{}': {}", - trimmed, - e - ); - None - } - } - }) - .collect() - }); - - let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); - - if is_trusted_proxy { - // Priority 1: X-Real-IP - if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { - if let Ok(parsed_ip) = real_ip.trim().parse::() { - return Ok(parsed_ip.to_string()); - } - } - - // Priority 2: X-Forwarded-For - if let Some(forwarded_for) = req - .headers() - .get("X-Forwarded-For") - .and_then(|h| h.to_str().ok()) - { - // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration - // uses `proxy_add_x_forwarded_for`, which appends the connecting peer's IP (the hop right before Nginx) to the right. - // We pick the rightmost IP because that is the most trusted hop added by our reverse proxy, preventing client-side spoofing. - // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer - // will put its own IP rightmost, making all traffic share one rate limit bucket. - if let Some(last_ip) = forwarded_for.split(',').next_back() { - if let Ok(parsed_ip) = last_ip.trim().parse::() { - if Some(parsed_ip) == peer_ip { - tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); - } else { - tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); - } - return Ok(parsed_ip.to_string()); - } - } - } - tracing::warn!( - "TRUSTED_PROXY_IPS allowed proxy IP {}, but no valid X-Real-IP or X-Forwarded-For header was found. Rate limiting will apply to the proxy IP.", - peer_ip.unwrap() - ); - } - - peer_ip - .map(|ip| ip.to_string()) - .ok_or(tower_governor::GovernorError::UnableToExtractKey) - } -} - #[inline(never)] fn verify_password(password: &str, password_hash: &str) -> bool { let parsed_hash = match PasswordHash::new(password_hash) { @@ -185,7 +108,7 @@ pub fn router(state: crate::state::AppState) -> Router { let login_governor_layer = tower_governor::GovernorLayer { config: std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) .burst_size(1) .finish() @@ -196,7 +119,7 @@ pub fn router(state: crate::state::AppState) -> Router { let password_governor_layer = tower_governor::GovernorLayer { config: std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) .burst_size(1) .finish() @@ -207,7 +130,7 @@ pub fn router(state: crate::state::AppState) -> Router { let me_governor_layer = tower_governor::GovernorLayer { config: std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(TrustedProxyIpKeyExtractor) + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(5) .burst_size(10) .finish() @@ -254,6 +177,10 @@ async fn login( return Err((StatusCode::BAD_REQUEST, "Password too long".to_string())); } + if req.username.len() > 64 { + return Err((StatusCode::BAD_REQUEST, "Username too long".to_string())); + } + let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE username = ?") .bind(&req.username) diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index 84c5652c..a2efa9b6 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -1,8 +1,91 @@ +use axum::http::Request; use axum::Router; +use std::sync::OnceLock; pub mod admin; mod public; +#[derive(Clone)] +pub struct TrustedProxyIpKeyExtractor; + +impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor { + type Key = String; + + fn extract(&self, req: &Request) -> Result { + let peer_ip = req + .extensions() + .get::>() + .map(|ci| ci.0.ip()); + + static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); + let trusted_ips = TRUSTED_PROXY_IPS.get_or_init(|| { + std::env::var("TRUSTED_PROXY_IPS") + .unwrap_or_default() + .split(',') + .filter_map(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + return None; + } + match trimmed.parse() { + Ok(ip) => Some(ip), + Err(e) => { + tracing::warn!( + "Invalid IP address in TRUSTED_PROXY_IPS '{}': {}", + trimmed, + e + ); + None + } + } + }) + .collect() + }); + + let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); + + if is_trusted_proxy { + // Priority 1: X-Real-IP + if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { + if let Ok(parsed_ip) = real_ip.trim().parse::() { + return Ok(parsed_ip.to_string()); + } + } + + // Priority 2: X-Forwarded-For + if let Some(forwarded_for) = req + .headers() + .get("X-Forwarded-For") + .and_then(|h| h.to_str().ok()) + { + // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration + // uses `proxy_add_x_forwarded_for`, which appends the connecting peer's IP (the hop right before Nginx) to the right. + // We pick the rightmost IP because that is the most trusted hop added by our reverse proxy, preventing client-side spoofing. + // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer + // will put its own IP rightmost, making all traffic share one rate limit bucket. + if let Some(last_ip) = forwarded_for.split(',').next_back() { + if let Ok(parsed_ip) = last_ip.trim().parse::() { + if Some(parsed_ip) == peer_ip { + tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); + } else { + tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); + } + return Ok(parsed_ip.to_string()); + } + } + } + tracing::warn!( + "TRUSTED_PROXY_IPS allowed proxy IP {}, but no valid X-Real-IP or X-Forwarded-For header was found. Rate limiting will apply to the proxy IP.", + peer_ip.unwrap() + ); + } + + peer_ip + .map(|ip| ip.to_string()) + .ok_or(tower_governor::GovernorError::UnableToExtractKey) + } +} + pub fn router(state: crate::state::AppState) -> Router { Router::new() .merge(public::router(state.clone())) diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 6fdc87ea..0cb38715 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -9,26 +9,21 @@ pub struct Pagination { pub before: Option, } pub fn router(state: crate::state::AppState) -> Router { + let common_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ); + let articles_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::admin::TrustedProxyIpKeyExtractor) - .per_second(5) - .burst_size(20) - .finish() - .unwrap(), - ), + config: common_governor_config.clone(), }; let blog_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::admin::TrustedProxyIpKeyExtractor) - .per_second(5) - .burst_size(20) - .finish() - .unwrap(), - ), + config: common_governor_config.clone(), }; Router::new() @@ -56,6 +51,10 @@ async fn list_articles( ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); + if query.before.is_some() && query.offset.is_some() { + return Err(axum::http::StatusCode::BAD_REQUEST); + } + let rows_res = if let Some(before) = query.before { if chrono::DateTime::parse_from_rfc3339(&before).is_err() { return Err(axum::http::StatusCode::BAD_REQUEST); @@ -94,6 +93,10 @@ async fn list_blog_posts( ) -> Result>, axum::http::StatusCode> { let limit = query.limit.unwrap_or(20).min(50); + if query.before.is_some() && query.offset.is_some() { + return Err(axum::http::StatusCode::BAD_REQUEST); + } + let rows_res = if let Some(before) = query.before { if chrono::DateTime::parse_from_rfc3339(&before).is_err() { return Err(axum::http::StatusCode::BAD_REQUEST); diff --git a/backend/src/main.rs b/backend/src/main.rs index d95c1a0c..58cc3494 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -37,6 +37,7 @@ async fn main() -> Result<(), Box> { // Initialize JWT Secret early so it panics at startup if missing shared::auth::init_jwt_secret(); + crate::api::admin::init_dummy_hash(); // Improved error handling for DATABASE_URL let database_url = std::env::var("DATABASE_URL") diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 33921be3..8439cb8f 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -93,8 +93,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F fi ESCAPED_HASH="${ADMIN_HASH//\'/\'\'}" -sqlite3 sqlite.db -cmd ".param set @id '$SAFE_UUID'" -cmd ".param set @hash '$ESCAPED_HASH'" \ - "INSERT INTO users (id, username, password_hash) VALUES (@id, 'admin', @hash) ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db \ + "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ESCAPED_HASH') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" From f3d010681e5dae676fee23c27137a479df09e400 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 10:28:26 -0700 Subject: [PATCH 53/64] feat: Harden password verification against timing attacks, expand trusted proxy IP detection to include IPv6, and standardize date query parameter formatting. --- backend/src/api/admin.rs | 38 ++++++++++++++++----------------- backend/src/api/public.rs | 45 ++++++++++++++++++++++----------------- backend/src/main.rs | 20 ++++++++++++++--- 3 files changed, 62 insertions(+), 41 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 8ee23594..781a2a01 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -90,7 +90,13 @@ fn hash_password(password: &str) -> Result { fn verify_password(password: &str, password_hash: &str) -> bool { let parsed_hash = match PasswordHash::new(password_hash) { Ok(h) => h, - Err(_) => return false, + Err(_) => { + tracing::error!("Failed to parse password hash!"); + let dummy = get_dummy_hash(); + let parsed_dummy = PasswordHash::new(dummy).unwrap(); + let _ = get_argon2().verify_password(password.as_bytes(), &parsed_dummy); + return false; + } }; get_argon2() .verify_password(password.as_bytes(), &parsed_hash) @@ -105,26 +111,21 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); + let auth_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + let login_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: auth_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ), + config: auth_governor_config.clone(), }; let me_governor_layer = tower_governor::GovernorLayer { @@ -248,7 +249,7 @@ async fn me( .ok_or(StatusCode::UNAUTHORIZED)?; let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); - let token_data = jsonwebtoken::decode::( + let _token_data = jsonwebtoken::decode::( token, &jsonwebtoken::DecodingKey::from_secret(shared::auth::get_jwt_secret()), &validation, @@ -263,7 +264,6 @@ async fn me( Ok(Json(serde_json::json!({ "authenticated": true, - "user_id": token_data.claims.sub }))) } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 0cb38715..414e9696 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -9,21 +9,26 @@ pub struct Pagination { pub before: Option, } pub fn router(state: crate::state::AppState) -> Router { - let common_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(5) - .burst_size(20) - .finish() - .unwrap(), - ); - let articles_governor_layer = tower_governor::GovernorLayer { - config: common_governor_config.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ), }; let blog_governor_layer = tower_governor::GovernorLayer { - config: common_governor_config.clone(), + config: std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ), }; Router::new() @@ -56,11 +61,12 @@ async fn list_articles( } let rows_res = if let Some(before) = query.before { - if chrono::DateTime::parse_from_rfc3339(&before).is_err() { - return Err(axum::http::StatusCode::BAD_REQUEST); - } + let dt = chrono::DateTime::parse_from_rfc3339(&before) + .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? + .to_utc(); + let normalized = dt.format("%Y-%m-%dT%H:%M:%6f").to_string() + "Z"; sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") - .bind(before) + .bind(normalized) .bind(limit) .try_map(map_article_row) .fetch_all(&pool) @@ -98,11 +104,12 @@ async fn list_blog_posts( } let rows_res = if let Some(before) = query.before { - if chrono::DateTime::parse_from_rfc3339(&before).is_err() { - return Err(axum::http::StatusCode::BAD_REQUEST); - } + let dt = chrono::DateTime::parse_from_rfc3339(&before) + .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? + .to_utc(); + let normalized = dt.format("%Y-%m-%dT%H:%M:%6f").to_string() + "Z"; sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") - .bind(before) + .bind(normalized) .bind(limit) .try_map(map_blog_post_row) .fetch_all(&pool) diff --git a/backend/src/main.rs b/backend/src/main.rs index 58cc3494..86d48c1f 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -89,12 +89,26 @@ async fn main() -> Result<(), Box> { let default_ips = ips.split(',').map(|s| s.trim()).filter(|s| !s.is_empty()); let mut has_private = false; for ip_str in default_ips { - if let Ok(std::net::IpAddr::V4(v4)) = ip_str.parse::() { - let octets = v4.octets(); - if octets[0] == 10 || (octets[0] == 172 && (16..=31).contains(&octets[1])) || (octets[0] == 192 && octets[1] == 168) { + if let Ok(ip) = ip_str.parse::() { + if ip.is_loopback() { has_private = true; break; } + match ip { + std::net::IpAddr::V4(v4) => { + let octets = v4.octets(); + if octets[0] == 10 || (octets[0] == 172 && (16..=31).contains(&octets[1])) || (octets[0] == 192 && octets[1] == 168) { + has_private = true; + break; + } + } + std::net::IpAddr::V6(v6) => { + if (v6.segments()[0] & 0xfe00) == 0xfc00 { + has_private = true; + break; + } + } + } } } From 2b9f73d93b63efabc0ea3afee40d458e63ed0e76 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 10:53:49 -0700 Subject: [PATCH 54/64] Refactor rate limiter configuration and timestamp formatting, improve SQLite string escaping, and refine X-Forwarded-For IP extraction logic. --- backend/src/api/mod.rs | 3 ++- backend/src/api/public.rs | 31 +++++++++++++------------------ scripts/setup-dev.sh | 5 ++--- 3 files changed, 17 insertions(+), 22 deletions(-) diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index a2efa9b6..9dce569e 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -67,10 +67,11 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor if let Ok(parsed_ip) = last_ip.trim().parse::() { if Some(parsed_ip) == peer_ip { tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); + // fall through to peer_ip fallback below } else { tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); + return Ok(parsed_ip.to_string()); } - return Ok(parsed_ip.to_string()); } } } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 414e9696..7c59eba2 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -9,26 +9,21 @@ pub struct Pagination { pub before: Option, } pub fn router(state: crate::state::AppState) -> Router { + let public_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(5) + .burst_size(20) + .finish() + .unwrap(), + ); + let articles_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(5) - .burst_size(20) - .finish() - .unwrap(), - ), + config: public_governor_config.clone(), }; let blog_governor_layer = tower_governor::GovernorLayer { - config: std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(5) - .burst_size(20) - .finish() - .unwrap(), - ), + config: public_governor_config, }; Router::new() @@ -64,7 +59,7 @@ async fn list_articles( let dt = chrono::DateTime::parse_from_rfc3339(&before) .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? .to_utc(); - let normalized = dt.format("%Y-%m-%dT%H:%M:%6f").to_string() + "Z"; + let normalized = dt.format("%Y-%m-%dT%H:%M:%3fZ").to_string(); sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(normalized) .bind(limit) @@ -107,7 +102,7 @@ async fn list_blog_posts( let dt = chrono::DateTime::parse_from_rfc3339(&before) .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? .to_utc(); - let normalized = dt.format("%Y-%m-%dT%H:%M:%6f").to_string() + "Z"; + let normalized = dt.format("%Y-%m-%dT%H:%M:%3fZ").to_string(); sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") .bind(normalized) .bind(limit) diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 8439cb8f..91762474 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,9 +92,8 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -ESCAPED_HASH="${ADMIN_HASH//\'/\'\'}" -sqlite3 sqlite.db \ - "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ESCAPED_HASH') ON CONFLICT (username) DO NOTHING;" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db "$(printf "INSERT INTO users (id, username, password_hash) VALUES ('%s', 'admin', '%s') ON CONFLICT (username) DO NOTHING;" \ + "$SAFE_UUID" "$(printf '%s' "$ADMIN_HASH" | sed "s/'/''/g")")" || echo "⚠️ Could not create user (may already exist)" echo "" echo "✅ Setup complete!" From cda3d8f1d052c3cdd8e17ef2a5f80de17f2ff95a Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 11:03:56 -0700 Subject: [PATCH 55/64] feat: Add `updated_at` columns and timestamp normalization migrations, refine backend login rate limits, and enable debug-only frontend logging. --- backend/src/api/admin.rs | 15 ++++++++++++--- frontend/src/pages/sections.rs | 3 +++ migrations/20260325000000_add_updated_at.sql | 8 ++++++++ .../20260325000001_normalize_timestamps.sql | 10 ++++++++++ scripts/remote_build.sh | 3 ++- scripts/setup-dev.sh | 9 +++++++-- shared/src/auth.rs | 2 +- 7 files changed, 43 insertions(+), 7 deletions(-) create mode 100644 migrations/20260325000000_add_updated_at.sql create mode 100644 migrations/20260325000001_normalize_timestamps.sql diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 781a2a01..832d7964 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,7 +111,16 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let auth_governor_config = std::sync::Arc::new( + let login_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + + let password_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -121,11 +130,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config.clone(), + config: login_governor_config, }; let password_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config.clone(), + config: password_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 87df71c7..51fdfedd 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -396,6 +396,7 @@ pub fn JournalismArticlePage() -> impl IntoView { Effect::new(move || { #[cfg(target_arch = "wasm32")] { + #[cfg(debug_assertions)] web_sys::console::log_1(&"Checking auth token...".into()); if let Ok(Some(storage)) = web_sys::window().unwrap().local_storage() { if let Ok(Some(t)) = storage.get_item("admin_token") { @@ -404,9 +405,11 @@ pub fn JournalismArticlePage() -> impl IntoView { if !t.is_empty() { _set_token.set(t); _set_is_admin.set(true); + #[cfg(debug_assertions)] web_sys::console::log_1(&"Admin mode enabled".into()); } } else { + #[cfg(debug_assertions)] web_sys::console::log_1(&"No token found in localStorage".into()); } } diff --git a/migrations/20260325000000_add_updated_at.sql b/migrations/20260325000000_add_updated_at.sql new file mode 100644 index 00000000..52196831 --- /dev/null +++ b/migrations/20260325000000_add_updated_at.sql @@ -0,0 +1,8 @@ +-- Add updated_at columns to core entities where they were missing + +ALTER TABLE articles ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); +ALTER TABLE blog_posts ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); +ALTER TABLE creative_works ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); +ALTER TABLE media_items ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); +ALTER TABLE music_tracks ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); +ALTER TABLE projects ADD COLUMN updated_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')); diff --git a/migrations/20260325000001_normalize_timestamps.sql b/migrations/20260325000001_normalize_timestamps.sql new file mode 100644 index 00000000..d1510597 --- /dev/null +++ b/migrations/20260325000001_normalize_timestamps.sql @@ -0,0 +1,10 @@ +-- Normalize datetime precision to match the default SQLite %f format (milliseconds) +-- This ensures consistent precision for cursor-based pagination. + +UPDATE articles +SET published_at = strftime('%Y-%m-%dT%H:%M:%fZ', published_at) +WHERE published_at IS NOT NULL; + +UPDATE blog_posts +SET published_at = strftime('%Y-%m-%dT%H:%M:%fZ', published_at) +WHERE published_at IS NOT NULL; diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 99103dcd..402e719c 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -25,7 +25,8 @@ DATABASE_URL=sqlite:////app/data/sqlite.db ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') # Warning: Ephemeral Docker Bridge IPs change on restart. -# In production, use the Docker network name resolved at startup, or assign fixed IPs with --ip via docker-compose. +# Run \`docker network inspect jakewraydev_default\` to find the proxy IP, +# or assign a static IP to the proxy container in compose.prod.yaml. TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 EOF else diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 91762474..e4a25122 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,8 +92,13 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db "$(printf "INSERT INTO users (id, username, password_hash) VALUES ('%s', 'admin', '%s') ON CONFLICT (username) DO NOTHING;" \ - "$SAFE_UUID" "$(printf '%s' "$ADMIN_HASH" | sed "s/'/''/g")")" || echo "⚠️ Could not create user (may already exist)" +sqlite3 sqlite.db < &'static [u8] { panic!("JWT_SECRET environment variable must be set. If this is a frontend/WASM build, the 'ssr' feature may have been incorrectly enabled."); }); if secret.len() < 32 { - panic!("JWT_SECRET must be at least 32 characters long for security."); + panic!("JWT_SECRET must be at least 32 bytes long for security."); } secret.into_bytes() }) From cfae4b3b5ae8610fcde169e421837cf17de5b438 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 11:17:30 -0700 Subject: [PATCH 56/64] feat: Add flexible datetime parsing for blog post dates and refactor admin API rate limiters. --- backend/src/api/admin.rs | 30 +++++++++++------------------- backend/src/api/mod.rs | 4 ++++ backend/src/api/public.rs | 18 ++++++++++++++++-- scripts/setup-dev.sh | 5 +---- 4 files changed, 32 insertions(+), 25 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 832d7964..43ecd94b 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,16 +111,7 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let login_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - - let password_governor_config = std::sync::Arc::new( + let auth_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -130,11 +121,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: login_governor_config, + config: auth_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: password_governor_config, + config: auth_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -334,17 +325,18 @@ async fn change_password( )); } - let user_id = uuid::Uuid::parse_str(&token_data.claims.sub).map_err(|e| { - tracing::error!("Valid JWT contained invalid UUID string: {}", e); - ( + let user_id_str = &token_data.claims.sub; + if uuid::Uuid::parse_str(user_id_str).is_err() { + tracing::error!("Valid JWT contained invalid UUID string: {}", user_id_str); + return Err(( StatusCode::INTERNAL_SERVER_ERROR, "Invalid token payload".to_string(), - ) - })?; + )); + } // Verify current password let user: Option = sqlx::query_as("SELECT id, password_hash FROM users WHERE id = ?") - .bind(user_id.to_string()) + .bind(user_id_str) .fetch_optional(&pool) .await .map_err(|e| { @@ -380,7 +372,7 @@ async fn change_password( sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) - .bind(user_id.to_string()) + .bind(user_id_str) .execute(&pool) .await .map_err(|e| { diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index 9dce569e..2227dce9 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -46,6 +46,10 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor if is_trusted_proxy { // Priority 1: X-Real-IP + // SECURITY NOTE: We unconditionally trust X-Real-IP here because `is_trusted_proxy` + // confirmed this request came from our trusted local reverse proxy. This behavior + // assumes that Nginx is explicitly configured with `proxy_set_header X-Real-IP $remote_addr;` + // to overwrite any potentially forged X-Real-IP header sent by the client. if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { if let Ok(parsed_ip) = real_ip.trim().parse::() { return Ok(parsed_ip.to_string()); diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 7c59eba2..8de1a9f8 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -152,7 +152,7 @@ fn map_article_row(row: sqlx::sqlite::SqliteRow) -> Result content: row.try_get("content")?, cover_image_url: row.try_get("cover_image_url")?, author: row.try_get("author")?, - published_at: row.try_get("published_at")?, + published_at: parse_flexible_datetime(row.try_get("published_at")?)?, origin, }) } @@ -175,7 +175,21 @@ fn map_blog_post_row(row: sqlx::sqlite::SqliteRow) -> Result Result, sqlx::Error> { + chrono::DateTime::parse_from_rfc3339(&dt_str) + .map(|dt| dt.with_timezone(&chrono::Utc)) + .or_else(|_| { + chrono::NaiveDateTime::parse_from_str(&dt_str, "%Y-%m-%d %H:%M:%S") + .map(|ndt| ndt.and_utc()) + }) + .or_else(|_| { + chrono::NaiveDateTime::parse_from_str(&dt_str, "%Y-%m-%d %H:%M:%S%.f") + .map(|ndt| ndt.and_utc()) + }) + .map_err(|e| sqlx::Error::Decode(Box::new(e))) +} diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index e4a25122..7a753f52 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,10 +92,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db < Date: Tue, 24 Mar 2026 13:53:03 -0700 Subject: [PATCH 57/64] feat: Implement database triggers for `updated_at` auto-updates and `published_at` format validation, and refactor backend rate limiters and the dev setup script. --- backend/src/api/admin.rs | 21 ++++++--- ...20260325000002_add_updated_at_triggers.sql | 43 +++++++++++++++++++ .../20260325000003_add_published_at_check.sql | 29 +++++++++++++ scripts/setup-dev.sh | 4 +- 4 files changed, 89 insertions(+), 8 deletions(-) create mode 100644 migrations/20260325000002_add_updated_at_triggers.sql create mode 100644 migrations/20260325000003_add_published_at_check.sql diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 43ecd94b..17faa9d5 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,7 +111,16 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended // to pair this with an OS-level fail2ban or log-based alerting to compensate. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let auth_governor_config = std::sync::Arc::new( + let login_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + + let password_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -121,11 +130,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config.clone(), + config: login_governor_config, }; let password_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config, + config: password_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -236,7 +245,7 @@ async fn login( async fn me( headers: HeaderMap, - peer_info: Option>, + axum::extract::ConnectInfo(peer_addr): axum::extract::ConnectInfo, ) -> Result, StatusCode> { // Design Note: The /me endpoint validates the JWT cryptographically but does not query the database. // This means a deleted user's JWT remains valid until expiration (24h). For a single-admin personal site, @@ -255,9 +264,7 @@ async fn me( &validation, ) .map_err(|e| { - let ip = peer_info - .map(|ci| ci.0.ip().to_string()) - .unwrap_or_else(|| "unknown".to_string()); + let ip = peer_addr.ip().to_string(); tracing::warn!("Invalid token on /me from {}: {}", ip, e); StatusCode::UNAUTHORIZED })?; diff --git a/migrations/20260325000002_add_updated_at_triggers.sql b/migrations/20260325000002_add_updated_at_triggers.sql new file mode 100644 index 00000000..1a8b6a85 --- /dev/null +++ b/migrations/20260325000002_add_updated_at_triggers.sql @@ -0,0 +1,43 @@ +-- Add AFTER UPDATE triggers for core tables to auto-update the updated_at column + +CREATE TRIGGER update_articles_updated_at +AFTER UPDATE ON articles +FOR EACH ROW +BEGIN + UPDATE articles SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; + +CREATE TRIGGER update_blog_posts_updated_at +AFTER UPDATE ON blog_posts +FOR EACH ROW +BEGIN + UPDATE blog_posts SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; + +CREATE TRIGGER update_creative_works_updated_at +AFTER UPDATE ON creative_works +FOR EACH ROW +BEGIN + UPDATE creative_works SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; + +CREATE TRIGGER update_media_items_updated_at +AFTER UPDATE ON media_items +FOR EACH ROW +BEGIN + UPDATE media_items SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; + +CREATE TRIGGER update_music_tracks_updated_at +AFTER UPDATE ON music_tracks +FOR EACH ROW +BEGIN + UPDATE music_tracks SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; + +CREATE TRIGGER update_projects_updated_at +AFTER UPDATE ON projects +FOR EACH ROW +BEGIN + UPDATE projects SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; +END; diff --git a/migrations/20260325000003_add_published_at_check.sql b/migrations/20260325000003_add_published_at_check.sql new file mode 100644 index 00000000..f1528cee --- /dev/null +++ b/migrations/20260325000003_add_published_at_check.sql @@ -0,0 +1,29 @@ +-- Add triggers to validate published_at datetime format on insert and update for articles and blog_posts + +CREATE TRIGGER check_articles_published_at_insert +BEFORE INSERT ON articles +WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +BEGIN + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); +END; + +CREATE TRIGGER check_articles_published_at_update +BEFORE UPDATE ON articles +WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +BEGIN + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); +END; + +CREATE TRIGGER check_blog_posts_published_at_insert +BEFORE INSERT ON blog_posts +WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +BEGIN + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); +END; + +CREATE TRIGGER check_blog_posts_published_at_update +BEFORE UPDATE ON blog_posts +WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +BEGIN + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); +END; diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 7a753f52..5ecd0161 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,7 +92,9 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db "INSERT INTO users (id, username, password_hash) VALUES ('$SAFE_UUID', 'admin', '$ADMIN_HASH') ON CONFLICT (username) DO NOTHING;" +sqlite3 sqlite.db < Date: Tue, 24 Mar 2026 14:21:21 -0700 Subject: [PATCH 58/64] refactor: Enhance IP logging for invalid tokens, optimize database update triggers, and improve dev script SQL execution. --- GEMINI.md | 2 +- backend/src/api/admin.rs | 28 ++++++++++++++++--- .../20260325000003_add_published_at_check.sql | 4 +-- scripts/setup-dev.sh | 4 +-- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/GEMINI.md b/GEMINI.md index fcdeb0a0..4adcde1a 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -62,7 +62,7 @@ cargo leptos watch 5. **Deployment**: Deployment is managed by `./scripts/deploy.sh`. Changes to infrastructure should be mirrored in both `compose.yaml` and `compose.prod.yaml` where applicable. - - Tick off tasks in the roadmap as they are completed. +- Tick off tasks in the roadmap as they are completed. - Update the roadmap as the project progresses. - Update the plan as the project progresses. - Update the GEMINI.md SPARINGLY as the project progresses. diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 17faa9d5..dfce39e2 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -108,8 +108,8 @@ pub fn router(state: crate::state::AppState) -> Router { // Burst windows completely refresh across restarts. Therefore, the effective rate limiting // window ONLY covers uptime, not absolute calendar time. An attacker who can trigger or observe // restarts could reset their login throttle window. For a low-traffic personal site, this is an - // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is recommended - // to pair this with an OS-level fail2ban or log-based alerting to compensate. + // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is REQUIRED + // to pair this with an OS-level fail2ban or log-based alerting to compensate for the login endpoint. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); let login_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() @@ -264,8 +264,28 @@ async fn me( &validation, ) .map_err(|e| { - let ip = peer_addr.ip().to_string(); - tracing::warn!("Invalid token on /me from {}: {}", ip, e); + let real_ip = headers + .get("X-Real-IP") + .and_then(|h| h.to_str().ok()) + .map(|s| s.trim().to_string()); + + let forwarded_for = headers + .get("X-Forwarded-For") + .and_then(|h| h.to_str().ok()) + .and_then(|s| s.split(',').next_back()) + .map(|s| s.trim().to_string()); + + let client_ip = real_ip + .or(forwarded_for) + .unwrap_or_else(|| "unknown".to_string()); + let proxy_ip = peer_addr.ip().to_string(); + + tracing::warn!( + "Invalid token on /me from client IP {} (via proxy {}): {}", + client_ip, + proxy_ip, + e + ); StatusCode::UNAUTHORIZED })?; diff --git a/migrations/20260325000003_add_published_at_check.sql b/migrations/20260325000003_add_published_at_check.sql index f1528cee..0dd185c5 100644 --- a/migrations/20260325000003_add_published_at_check.sql +++ b/migrations/20260325000003_add_published_at_check.sql @@ -9,7 +9,7 @@ END; CREATE TRIGGER check_articles_published_at_update BEFORE UPDATE ON articles -WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +WHEN NEW.published_at IS NOT NULL AND NEW.published_at IS NOT OLD.published_at AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); END; @@ -23,7 +23,7 @@ END; CREATE TRIGGER check_blog_posts_published_at_update BEFORE UPDATE ON blog_posts -WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) +WHEN NEW.published_at IS NOT NULL AND NEW.published_at IS NOT OLD.published_at AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); END; diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index 5ecd0161..c2a3aa4c 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,9 +92,7 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -sqlite3 sqlite.db < Date: Tue, 24 Mar 2026 14:39:18 -0700 Subject: [PATCH 59/64] Refactor API initialization for trusted proxies and authentication rate limiters, offload password operations to blocking tasks, sanitize client IP logging, and correct a SQL `published_at` format error message --- backend/src/api/admin.rs | 48 +++++++++------- backend/src/api/mod.rs | 57 +++++++++++-------- backend/src/main.rs | 1 + .../20260325000003_add_published_at_check.sql | 8 +-- 4 files changed, 64 insertions(+), 50 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index dfce39e2..e85247e4 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,16 +111,7 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is REQUIRED // to pair this with an OS-level fail2ban or log-based alerting to compensate for the login endpoint. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let login_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - - let password_governor_config = std::sync::Arc::new( + let auth_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -130,11 +121,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: login_governor_config, + config: auth_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: password_governor_config, + config: auth_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -214,7 +205,11 @@ async fn login( } }; - let password_match = verify_password(&req.password, hash_to_verify); + let hash = hash_to_verify.to_string(); + let pw = req.password.clone(); + let password_match = tokio::task::spawn_blocking(move || verify_password(&pw, &hash)) + .await + .unwrap_or(false); let is_invalid = !is_valid_user || !password_match; if is_invalid { @@ -278,11 +273,12 @@ async fn me( let client_ip = real_ip .or(forwarded_for) .unwrap_or_else(|| "unknown".to_string()); + let safe_client_ip = client_ip.replace(['\n', '\r'], " "); let proxy_ip = peer_addr.ip().to_string(); tracing::warn!( "Invalid token on /me from client IP {} (via proxy {}): {}", - client_ip, + safe_client_ip, proxy_ip, e ); @@ -379,7 +375,11 @@ async fn change_password( None => (get_dummy_hash(), false), }; - let password_match = verify_password(&req.current_password, hash_to_verify); + let hash = hash_to_verify.to_string(); + let pw = req.current_password.clone(); + let password_match = tokio::task::spawn_blocking(move || verify_password(&pw, &hash)) + .await + .unwrap_or(false); if !is_valid_user || !password_match { return Err(( @@ -389,13 +389,17 @@ async fn change_password( } // Hash new password and update - let new_hash = hash_password(&req.new_password).map_err(|e| { - tracing::error!("Failed to hash new password: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Failed to hash password".to_string(), - ) - })?; + let pw = req.new_password.clone(); + let new_hash = tokio::task::spawn_blocking(move || hash_password(&pw)) + .await + .unwrap_or_else(|_| Err("Task join failed".to_string())) + .map_err(|e| { + tracing::error!("Failed to hash new password: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to hash password".to_string(), + ) + })?; sqlx::query("UPDATE users SET password_hash = ? WHERE id = ?") .bind(new_hash) diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index 2227dce9..aa68a2a3 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -5,6 +5,38 @@ use std::sync::OnceLock; pub mod admin; mod public; +static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); + +pub fn init_trusted_proxies() { + let _ = get_trusted_proxies(); +} + +fn get_trusted_proxies() -> &'static Vec { + TRUSTED_PROXY_IPS.get_or_init(|| { + std::env::var("TRUSTED_PROXY_IPS") + .unwrap_or_default() + .split(',') + .filter_map(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + return None; + } + match trimmed.parse() { + Ok(ip) => Some(ip), + Err(e) => { + tracing::warn!( + "Invalid IP address in TRUSTED_PROXY_IPS '{}': {}", + trimmed, + e + ); + None + } + } + }) + .collect() + }) +} + #[derive(Clone)] pub struct TrustedProxyIpKeyExtractor; @@ -17,30 +49,7 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor .get::>() .map(|ci| ci.0.ip()); - static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); - let trusted_ips = TRUSTED_PROXY_IPS.get_or_init(|| { - std::env::var("TRUSTED_PROXY_IPS") - .unwrap_or_default() - .split(',') - .filter_map(|s| { - let trimmed = s.trim(); - if trimmed.is_empty() { - return None; - } - match trimmed.parse() { - Ok(ip) => Some(ip), - Err(e) => { - tracing::warn!( - "Invalid IP address in TRUSTED_PROXY_IPS '{}': {}", - trimmed, - e - ); - None - } - } - }) - .collect() - }); + let trusted_ips = get_trusted_proxies(); let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); diff --git a/backend/src/main.rs b/backend/src/main.rs index 86d48c1f..ff8754e1 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -38,6 +38,7 @@ async fn main() -> Result<(), Box> { // Initialize JWT Secret early so it panics at startup if missing shared::auth::init_jwt_secret(); crate::api::admin::init_dummy_hash(); + crate::api::init_trusted_proxies(); // Improved error handling for DATABASE_URL let database_url = std::env::var("DATABASE_URL") diff --git a/migrations/20260325000003_add_published_at_check.sql b/migrations/20260325000003_add_published_at_check.sql index 0dd185c5..867dbeb0 100644 --- a/migrations/20260325000003_add_published_at_check.sql +++ b/migrations/20260325000003_add_published_at_check.sql @@ -4,26 +4,26 @@ CREATE TRIGGER check_articles_published_at_insert BEFORE INSERT ON articles WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN - SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%fZ format'); END; CREATE TRIGGER check_articles_published_at_update BEFORE UPDATE ON articles WHEN NEW.published_at IS NOT NULL AND NEW.published_at IS NOT OLD.published_at AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN - SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%fZ format'); END; CREATE TRIGGER check_blog_posts_published_at_insert BEFORE INSERT ON blog_posts WHEN NEW.published_at IS NOT NULL AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN - SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%fZ format'); END; CREATE TRIGGER check_blog_posts_published_at_update BEFORE UPDATE ON blog_posts WHEN NEW.published_at IS NOT NULL AND NEW.published_at IS NOT OLD.published_at AND NEW.published_at != strftime('%Y-%m-%dT%H:%M:%fZ', NEW.published_at) BEGIN - SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%3fZ format'); + SELECT RAISE(ABORT, 'published_at must be in %Y-%m-%dT%H:%M:%fZ format'); END; From 2b5d704437cb53d7fcd3277fe756ae756d073739 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 15:15:15 -0700 Subject: [PATCH 60/64] feat: Enhance database schema with check constraints, refine client IP logging based on trusted proxies, and simplify password trimming and datetime parsing logic. --- backend/src/api/admin.rs | 36 ++++++++++++-------- backend/src/api/mod.rs | 5 +-- backend/src/api/public.rs | 8 ----- hgen/src/main.rs | 5 +-- migrations/20260110000000_initial_schema.sql | 6 ++-- 5 files changed, 28 insertions(+), 32 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index e85247e4..9caa770a 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -259,22 +259,28 @@ async fn me( &validation, ) .map_err(|e| { - let real_ip = headers - .get("X-Real-IP") - .and_then(|h| h.to_str().ok()) - .map(|s| s.trim().to_string()); - - let forwarded_for = headers - .get("X-Forwarded-For") - .and_then(|h| h.to_str().ok()) - .and_then(|s| s.split(',').next_back()) - .map(|s| s.trim().to_string()); - - let client_ip = real_ip - .or(forwarded_for) - .unwrap_or_else(|| "unknown".to_string()); - let safe_client_ip = client_ip.replace(['\n', '\r'], " "); let proxy_ip = peer_addr.ip().to_string(); + let is_trusted_proxy = crate::api::get_trusted_proxies().contains(&peer_addr.ip()); + + let client_ip = if is_trusted_proxy { + let real_ip = headers + .get("X-Real-IP") + .and_then(|h| h.to_str().ok()) + .map(|s| s.trim().to_string()); + + let forwarded_for = headers + .get("X-Forwarded-For") + .and_then(|h| h.to_str().ok()) + .and_then(|s| s.split(',').next_back()) + .map(|s| s.trim().to_string()); + + real_ip + .or(forwarded_for) + .unwrap_or_else(|| "unknown".to_string()) + } else { + proxy_ip.clone() + }; + let safe_client_ip = client_ip.replace(['\n', '\r'], " "); tracing::warn!( "Invalid token on /me from client IP {} (via proxy {}): {}", diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index aa68a2a3..b84af48b 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -8,10 +8,11 @@ mod public; static TRUSTED_PROXY_IPS: OnceLock> = OnceLock::new(); pub fn init_trusted_proxies() { - let _ = get_trusted_proxies(); + let ips = get_trusted_proxies(); + tracing::info!("Initialized TRUSTED_PROXY_IPS: {:?}", ips); } -fn get_trusted_proxies() -> &'static Vec { +pub(crate) fn get_trusted_proxies() -> &'static Vec { TRUSTED_PROXY_IPS.get_or_init(|| { std::env::var("TRUSTED_PROXY_IPS") .unwrap_or_default() diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 8de1a9f8..9bca7e23 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -183,13 +183,5 @@ fn map_blog_post_row(row: sqlx::sqlite::SqliteRow) -> Result Result, sqlx::Error> { chrono::DateTime::parse_from_rfc3339(&dt_str) .map(|dt| dt.with_timezone(&chrono::Utc)) - .or_else(|_| { - chrono::NaiveDateTime::parse_from_str(&dt_str, "%Y-%m-%d %H:%M:%S") - .map(|ndt| ndt.and_utc()) - }) - .or_else(|_| { - chrono::NaiveDateTime::parse_from_str(&dt_str, "%Y-%m-%d %H:%M:%S%.f") - .map(|ndt| ndt.and_utc()) - }) .map_err(|e| sqlx::Error::Decode(Box::new(e))) } diff --git a/hgen/src/main.rs b/hgen/src/main.rs index d72e564a..f025b8fe 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -13,10 +13,7 @@ use argon2::{ fn main() { let mut password = String::new(); std::io::stdin().read_line(&mut password).expect("Failed to read password"); - let password = password - .strip_suffix("\r\n") - .or_else(|| password.strip_suffix("\n")) - .unwrap_or(&password); + let password = password.trim_end_matches(['\r', '\n']); let salt = SaltString::generate(&mut OsRng); let params = argon2::Params::new( shared::auth::ARGON2_M_COST, diff --git a/migrations/20260110000000_initial_schema.sql b/migrations/20260110000000_initial_schema.sql index 2b7f5cc6..16887831 100644 --- a/migrations/20260110000000_initial_schema.sql +++ b/migrations/20260110000000_initial_schema.sql @@ -46,7 +46,7 @@ CREATE TABLE creative_works ( id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab', (random() & 3) + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))), slug TEXT NOT NULL UNIQUE, title TEXT NOT NULL, - work_type TEXT NOT NULL, -- 'story', 'novel', 'poetry' + work_type TEXT NOT NULL CHECK(work_type IN ('story', 'novel', 'poetry')), -- 'story', 'novel', 'poetry' synopsis TEXT, content TEXT, -- Full text or chapters (can be JSON if complex) status TEXT NOT NULL DEFAULT 'published', -- 'draft', 'published' @@ -64,8 +64,8 @@ CREATE TABLE media_items ( description TEXT, url TEXT NOT NULL, -- S3 URL or local path thumbnail_url TEXT, - category TEXT NOT NULL, -- 'photography', 'visual_art', 'video', 'j_school' - context TEXT NOT NULL DEFAULT 'personal', -- To distinguish Photojournalism (prof) vs Personal + category TEXT NOT NULL CHECK(category IN ('photography', 'visual_art', 'video', 'j_school')), -- 'photography', 'visual_art', 'video', 'j_school' + context TEXT NOT NULL DEFAULT 'personal' CHECK(context IN ('personal', 'professional')), -- To distinguish Photojournalism (prof) vs Personal taken_at DATETIME, created_at DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ); From 8e44b2bdb366ed4ae7b8f1b438bce627c09e7362 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 15:44:11 -0700 Subject: [PATCH 61/64] feat: Implement robust client IP extraction for rate limiting, refine `updated_at` database triggers, and improve setup script robustness. --- README.md | 7 +- backend/src/api/admin.rs | 24 +---- backend/src/api/mod.rs | 102 +++++++++--------- ...20260325000002_add_updated_at_triggers.sql | 6 ++ scripts/remote_build.sh | 2 +- scripts/setup-dev.sh | 7 +- 6 files changed, 74 insertions(+), 74 deletions(-) diff --git a/README.md b/README.md index 5fd8c272..c534036a 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ For first-time SSL setup on the server: ### Quick Start with Nix (Recommended) ```bash -direnv allow # Load development environment +direnv allow # Load development environment ./scripts/setup-dev.sh # Setup database cargo leptos watch # Start dev server ``` @@ -51,7 +51,7 @@ cargo leptos watch ## Project Structure - `backend/` - Server-side Rust code -- `frontend/` - Client-side Leptos components +- `frontend/` - Client-side Leptos components - `shared/` - Shared types and utilities - `flake.nix` - Nix development environment - `.envrc` - direnv configuration @@ -61,8 +61,7 @@ cargo leptos watch - [x] **HTTPS/SSL** - Let's Encrypt certificates - [x] **Authentication** - Password-protected admin panel - [x] **Theme** - Modern indigo design -- [ ] **Admin features** - Post creation, sync manager -- [ ] **Content sync** - Import from terracestandard.com +- [ ] **Admin features** - Post creation - [ ] **Media library** - Photo/video management - [x] **Password hashing** - Argon2 implementation - [ ] **Password reset** - Email-based recovery diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 9caa770a..0287e4a3 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -260,26 +260,9 @@ async fn me( ) .map_err(|e| { let proxy_ip = peer_addr.ip().to_string(); - let is_trusted_proxy = crate::api::get_trusted_proxies().contains(&peer_addr.ip()); - - let client_ip = if is_trusted_proxy { - let real_ip = headers - .get("X-Real-IP") - .and_then(|h| h.to_str().ok()) - .map(|s| s.trim().to_string()); - - let forwarded_for = headers - .get("X-Forwarded-For") - .and_then(|h| h.to_str().ok()) - .and_then(|s| s.split(',').next_back()) - .map(|s| s.trim().to_string()); - - real_ip - .or(forwarded_for) - .unwrap_or_else(|| "unknown".to_string()) - } else { - proxy_ip.clone() - }; + + let client_ip = crate::api::extract_client_ip(&headers, Some(peer_addr.ip())) + .unwrap_or_else(|| proxy_ip.clone()); let safe_client_ip = client_ip.replace(['\n', '\r'], " "); tracing::warn!( @@ -293,6 +276,7 @@ async fn me( Ok(Json(serde_json::json!({ "authenticated": true, + "sub": _token_data.claims.sub }))) } diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index b84af48b..7fc89f26 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -38,6 +38,53 @@ pub(crate) fn get_trusted_proxies() -> &'static Vec { }) } +pub fn extract_client_ip( + headers: &axum::http::HeaderMap, + peer_ip: Option, +) -> Option { + let trusted_ips = get_trusted_proxies(); + let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); + + if is_trusted_proxy { + // Priority 1: X-Real-IP + // SECURITY NOTE: We unconditionally trust X-Real-IP here because `is_trusted_proxy` + // confirmed this request came from our trusted local reverse proxy. This behavior + // assumes that Nginx is explicitly configured with `proxy_set_header X-Real-IP $remote_addr;` + // to overwrite any potentially forged X-Real-IP header sent by the client. + if let Some(real_ip) = headers.get("X-Real-IP").and_then(|h| h.to_str().ok()) { + if let Ok(parsed_ip) = real_ip.trim().parse::() { + return Some(parsed_ip.to_string()); + } + } + + // Priority 2: X-Forwarded-For + if let Some(forwarded_for) = headers.get("X-Forwarded-For").and_then(|h| h.to_str().ok()) { + // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration + // uses `proxy_add_x_forwarded_for`, which appends the connecting peer's IP (the hop right before Nginx) to the right. + // We pick the rightmost IP because that is the most trusted hop added by our reverse proxy, preventing client-side spoofing. + // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer + // will put its own IP rightmost, making all traffic share one rate limit bucket. + if let Some(last_ip) = forwarded_for.split(',').next_back() { + if let Ok(parsed_ip) = last_ip.trim().parse::() { + if Some(parsed_ip) == peer_ip { + tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); + // fall through to peer_ip fallback below + } else { + tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); + return Some(parsed_ip.to_string()); + } + } + } + } + tracing::warn!( + "TRUSTED_PROXY_IPS allowed proxy IP {}, but no valid X-Real-IP or X-Forwarded-For header was found. Rate limiting will apply to the proxy IP.", + peer_ip.unwrap() + ); + } + + peer_ip.map(|ip| ip.to_string()) +} + #[derive(Clone)] pub struct TrustedProxyIpKeyExtractor; @@ -45,58 +92,17 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor type Key = String; fn extract(&self, req: &Request) -> Result { - let peer_ip = req + let connect_info = req .extensions() - .get::>() - .map(|ci| ci.0.ip()); - - let trusted_ips = get_trusted_proxies(); - - let is_trusted_proxy = peer_ip.is_some_and(|ip| trusted_ips.contains(&ip)); - - if is_trusted_proxy { - // Priority 1: X-Real-IP - // SECURITY NOTE: We unconditionally trust X-Real-IP here because `is_trusted_proxy` - // confirmed this request came from our trusted local reverse proxy. This behavior - // assumes that Nginx is explicitly configured with `proxy_set_header X-Real-IP $remote_addr;` - // to overwrite any potentially forged X-Real-IP header sent by the client. - if let Some(real_ip) = req.headers().get("X-Real-IP").and_then(|h| h.to_str().ok()) { - if let Ok(parsed_ip) = real_ip.trim().parse::() { - return Ok(parsed_ip.to_string()); - } - } + .get::>(); - // Priority 2: X-Forwarded-For - if let Some(forwarded_for) = req - .headers() - .get("X-Forwarded-For") - .and_then(|h| h.to_str().ok()) - { - // We pick the rightmost IP (next_back) under the exact assumption that the trusted Nginx configuration - // uses `proxy_add_x_forwarded_for`, which appends the connecting peer's IP (the hop right before Nginx) to the right. - // We pick the rightmost IP because that is the most trusted hop added by our reverse proxy, preventing client-side spoofing. - // NOTE: This assumes Nginx is the ONLY intermediate proxy. Any CDN or external load balancer - // will put its own IP rightmost, making all traffic share one rate limit bucket. - if let Some(last_ip) = forwarded_for.split(',').next_back() { - if let Ok(parsed_ip) = last_ip.trim().parse::() { - if Some(parsed_ip) == peer_ip { - tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); - // fall through to peer_ip fallback below - } else { - tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); - return Ok(parsed_ip.to_string()); - } - } - } - } - tracing::warn!( - "TRUSTED_PROXY_IPS allowed proxy IP {}, but no valid X-Real-IP or X-Forwarded-For header was found. Rate limiting will apply to the proxy IP.", - peer_ip.unwrap() - ); + if connect_info.is_none() { + tracing::error!("CRITICAL: ConnectInfo is missing from request extensions! This should never happen because `into_make_service_with_connect_info` is used in main.rs. Rate limiting will fail closed and return 500 errors."); } - peer_ip - .map(|ip| ip.to_string()) + let peer_ip = connect_info.map(|ci| ci.0.ip()); + + extract_client_ip(req.headers(), peer_ip) .ok_or(tower_governor::GovernorError::UnableToExtractKey) } } diff --git a/migrations/20260325000002_add_updated_at_triggers.sql b/migrations/20260325000002_add_updated_at_triggers.sql index 1a8b6a85..14b4e30c 100644 --- a/migrations/20260325000002_add_updated_at_triggers.sql +++ b/migrations/20260325000002_add_updated_at_triggers.sql @@ -3,6 +3,7 @@ CREATE TRIGGER update_articles_updated_at AFTER UPDATE ON articles FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE articles SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; @@ -10,6 +11,7 @@ END; CREATE TRIGGER update_blog_posts_updated_at AFTER UPDATE ON blog_posts FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE blog_posts SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; @@ -17,6 +19,7 @@ END; CREATE TRIGGER update_creative_works_updated_at AFTER UPDATE ON creative_works FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE creative_works SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; @@ -24,6 +27,7 @@ END; CREATE TRIGGER update_media_items_updated_at AFTER UPDATE ON media_items FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE media_items SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; @@ -31,6 +35,7 @@ END; CREATE TRIGGER update_music_tracks_updated_at AFTER UPDATE ON music_tracks FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE music_tracks SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; @@ -38,6 +43,7 @@ END; CREATE TRIGGER update_projects_updated_at AFTER UPDATE ON projects FOR EACH ROW +WHEN NEW.updated_at IS OLD.updated_at BEGIN UPDATE projects SET updated_at = (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) WHERE id = NEW.id; END; diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 402e719c..5e4329a2 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -27,7 +27,7 @@ JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') # Warning: Ephemeral Docker Bridge IPs change on restart. # Run \`docker network inspect jakewraydev_default\` to find the proxy IP, # or assign a static IP to the proxy container in compose.prod.yaml. -TRUSTED_PROXY_IPS=172.18.0.2,172.18.0.3 +TRUSTED_PROXY_IPS= # REQUIRED: set to your Nginx container IP (docker network inspect ...) EOF else echo "Using existing .env file." diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index c2a3aa4c..a808dbab 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -92,7 +92,12 @@ if ! [[ "$SAFE_UUID" =~ ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F exit 1 fi -printf "INSERT INTO users (id, username, password_hash) VALUES ('%s', 'admin', '%s') ON CONFLICT (username) DO NOTHING;\n" "$SAFE_UUID" "$ADMIN_HASH" | sqlite3 sqlite.db +ESCAPED_HASH="${ADMIN_HASH//\'/\'\'}" +sqlite3 sqlite.db < Date: Tue, 24 Mar 2026 16:28:27 -0700 Subject: [PATCH 62/64] refactor: Enhance API error responses with descriptive messages, refine admin rate limiting, ensure safe image tag rendering, and clarify `TRUSTED_PROXY_IPS` documentation. --- backend/src/api/admin.rs | 18 ++++++++---- backend/src/api/public.rs | 52 +++++++++++++++++++++++++++------- frontend/src/pages/sections.rs | 4 ++- scripts/remote_build.sh | 5 ++-- 4 files changed, 60 insertions(+), 19 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 0287e4a3..5384ad8e 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,7 +111,16 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is REQUIRED // to pair this with an OS-level fail2ban or log-based alerting to compensate for the login endpoint. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let auth_governor_config = std::sync::Arc::new( + let login_governor_config = std::sync::Arc::new( + tower_governor::governor::GovernorConfigBuilder::default() + .key_extractor(crate::api::TrustedProxyIpKeyExtractor) + .per_second(1) + .burst_size(1) + .finish() + .unwrap(), + ); + + let password_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -121,11 +130,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config.clone(), + config: login_governor_config, }; let password_governor_layer = tower_governor::GovernorLayer { - config: auth_governor_config, + config: password_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { @@ -275,8 +284,7 @@ async fn me( })?; Ok(Json(serde_json::json!({ - "authenticated": true, - "sub": _token_data.claims.sub + "authenticated": true }))) } diff --git a/backend/src/api/public.rs b/backend/src/api/public.rs index 9bca7e23..ea22e94c 100644 --- a/backend/src/api/public.rs +++ b/backend/src/api/public.rs @@ -48,16 +48,24 @@ use sqlx::Row; async fn list_articles( State(pool): State, Query(query): Query, -) -> Result>, axum::http::StatusCode> { +) -> Result>, (axum::http::StatusCode, String)> { let limit = query.limit.unwrap_or(20).min(50); if query.before.is_some() && query.offset.is_some() { - return Err(axum::http::StatusCode::BAD_REQUEST); + return Err(( + axum::http::StatusCode::BAD_REQUEST, + "Cannot use 'before' and 'offset' together".to_string(), + )); } let rows_res = if let Some(before) = query.before { let dt = chrono::DateTime::parse_from_rfc3339(&before) - .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? + .map_err(|_| { + ( + axum::http::StatusCode::BAD_REQUEST, + "Invalid 'before' date format".to_string(), + ) + })? .to_utc(); let normalized = dt.format("%Y-%m-%dT%H:%M:%3fZ").to_string(); sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") @@ -69,7 +77,10 @@ async fn list_articles( } else { let offset = query.offset.unwrap_or(0); if offset > 10_000 { - return Err(axum::http::StatusCode::BAD_REQUEST); + return Err(( + axum::http::StatusCode::BAD_REQUEST, + "Offset too large".to_string(), + )); } sqlx::query("SELECT id, wp_id, slug, title, subtitle, excerpt, content, cover_image_url, author, published_at, origin FROM articles ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) @@ -83,7 +94,10 @@ async fn list_articles( Ok(articles) => Ok(Json(articles)), Err(e) => { tracing::error!("Failed to fetch articles: {}", e); - Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR) + Err(( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + )) } } } @@ -91,16 +105,24 @@ async fn list_articles( async fn list_blog_posts( State(pool): State, Query(query): Query, -) -> Result>, axum::http::StatusCode> { +) -> Result>, (axum::http::StatusCode, String)> { let limit = query.limit.unwrap_or(20).min(50); if query.before.is_some() && query.offset.is_some() { - return Err(axum::http::StatusCode::BAD_REQUEST); + return Err(( + axum::http::StatusCode::BAD_REQUEST, + "Cannot use 'before' and 'offset' together".to_string(), + )); } let rows_res = if let Some(before) = query.before { let dt = chrono::DateTime::parse_from_rfc3339(&before) - .map_err(|_| axum::http::StatusCode::BAD_REQUEST)? + .map_err(|_| { + ( + axum::http::StatusCode::BAD_REQUEST, + "Invalid 'before' date format".to_string(), + ) + })? .to_utc(); let normalized = dt.format("%Y-%m-%dT%H:%M:%3fZ").to_string(); sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts WHERE published_at < ? ORDER BY published_at DESC LIMIT ?") @@ -112,7 +134,10 @@ async fn list_blog_posts( } else { let offset = query.offset.unwrap_or(0); if offset > 10_000 { - return Err(axum::http::StatusCode::BAD_REQUEST); + return Err(( + axum::http::StatusCode::BAD_REQUEST, + "Offset too large".to_string(), + )); } sqlx::query("SELECT id, slug, title, content, published_at, tags FROM blog_posts ORDER BY published_at DESC LIMIT ? OFFSET ?") .bind(limit) @@ -126,7 +151,10 @@ async fn list_blog_posts( Ok(posts) => Ok(Json(posts)), Err(e) => { tracing::error!("Failed to fetch blog posts: {}", e); - Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR) + Err(( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database error".to_string(), + )) } } } @@ -183,5 +211,9 @@ fn map_blog_post_row(row: sqlx::sqlite::SqliteRow) -> Result Result, sqlx::Error> { chrono::DateTime::parse_from_rfc3339(&dt_str) .map(|dt| dt.with_timezone(&chrono::Utc)) + .or_else(|_| { + chrono::NaiveDateTime::parse_from_str(&dt_str, "%Y-%m-%d %H:%M:%S") + .map(|ndt| ndt.and_utc()) + }) .map_err(|e| sqlx::Error::Decode(Box::new(e))) } diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index 51fdfedd..ba79906e 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -239,9 +239,11 @@ fn linkify_images(html: &str) -> String { safe_url ); let wrapper_end = "
"; + let safe_img_tag = + format!("\"Article", safe_url); // Replace strict range - let new_content = format!("{}{}{}", wrapper_start, img_tag, wrapper_end); + let new_content = format!("{}{}{}", wrapper_start, safe_img_tag, wrapper_end); out.replace_range(abs_open..abs_close, &new_content); search_pos = abs_open + new_content.len(); diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 5e4329a2..1b167488 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -25,9 +25,8 @@ DATABASE_URL=sqlite:////app/data/sqlite.db ENVIRONMENT=production JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') # Warning: Ephemeral Docker Bridge IPs change on restart. -# Run \`docker network inspect jakewraydev_default\` to find the proxy IP, -# or assign a static IP to the proxy container in compose.prod.yaml. -TRUSTED_PROXY_IPS= # REQUIRED: set to your Nginx container IP (docker network inspect ...) +# Run `docker network inspect jakewraydev_default` to find the proxy IP, +# and manually add TRUSTED_PROXY_IPS= to this file if using rate limiting. EOF else echo "Using existing .env file." From 3b808c3bc9b4272c95fc3ea0fa53e940ff99ba6d Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 16:49:57 -0700 Subject: [PATCH 63/64] feat: Add `rel=noopener noreferrer` to links, improve IP extraction robustness, set `.env` file permissions, and refine password input. --- backend/src/api/admin.rs | 9 ++++++--- backend/src/api/mod.rs | 3 +-- frontend/src/pages/sections.rs | 4 ++-- hgen/src/main.rs | 2 +- scripts/remote_build.sh | 1 + 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 5384ad8e..887a639f 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -249,7 +249,7 @@ async fn login( async fn me( headers: HeaderMap, - axum::extract::ConnectInfo(peer_addr): axum::extract::ConnectInfo, + connect_info: Option>, ) -> Result, StatusCode> { // Design Note: The /me endpoint validates the JWT cryptographically but does not query the database. // This means a deleted user's JWT remains valid until expiration (24h). For a single-admin personal site, @@ -268,9 +268,12 @@ async fn me( &validation, ) .map_err(|e| { - let proxy_ip = peer_addr.ip().to_string(); + let proxy_ip = connect_info + .as_ref() + .map(|ci| ci.0.ip().to_string()) + .unwrap_or_else(|| "unknown".to_string()); - let client_ip = crate::api::extract_client_ip(&headers, Some(peer_addr.ip())) + let client_ip = crate::api::extract_client_ip(&headers, connect_info.map(|ci| ci.0.ip())) .unwrap_or_else(|| proxy_ip.clone()); let safe_client_ip = client_ip.replace(['\n', '\r'], " "); diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index 7fc89f26..e2f68084 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -102,8 +102,7 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor let peer_ip = connect_info.map(|ci| ci.0.ip()); - extract_client_ip(req.headers(), peer_ip) - .ok_or(tower_governor::GovernorError::UnableToExtractKey) + Ok(extract_client_ip(req.headers(), peer_ip).unwrap_or_else(|| "0.0.0.0".to_string())) } } diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index ba79906e..b2e8ea3d 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -235,7 +235,7 @@ fn linkify_images(html: &str) -> String { .replace("<", "<") .replace(">", ">"); let wrapper_start = format!( - "", + "", safe_url ); let wrapper_end = ""; @@ -357,7 +357,7 @@ pub fn JournalismPage() -> impl IntoView { }.into_any() }} - {(!has_image).then(|| view! {
"Image coming soon"
})} + // Removed duplicate placeholder div

{date}

diff --git a/hgen/src/main.rs b/hgen/src/main.rs index f025b8fe..9b70aee0 100644 --- a/hgen/src/main.rs +++ b/hgen/src/main.rs @@ -12,7 +12,7 @@ use argon2::{ }; fn main() { let mut password = String::new(); - std::io::stdin().read_line(&mut password).expect("Failed to read password"); + std::io::Read::read_to_string(&mut std::io::stdin(), &mut password).expect("Failed to read password"); let password = password.trim_end_matches(['\r', '\n']); let salt = SaltString::generate(&mut OsRng); let params = argon2::Params::new( diff --git a/scripts/remote_build.sh b/scripts/remote_build.sh index 1b167488..4dda0826 100644 --- a/scripts/remote_build.sh +++ b/scripts/remote_build.sh @@ -28,6 +28,7 @@ JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n') # Run `docker network inspect jakewraydev_default` to find the proxy IP, # and manually add TRUSTED_PROXY_IPS= to this file if using rate limiting. EOF +chmod 600 .env else echo "Using existing .env file." fi From 0bccbc08e2cb25555c5e040c452f525beb78c3c6 Mon Sep 17 00:00:00 2001 From: Jake Wray Date: Tue, 24 Mar 2026 17:03:59 -0700 Subject: [PATCH 64/64] feat: Improve rate limiter client IP extraction by returning an error for unreliable IPs and refactor shared governor configuration. --- backend/src/api/admin.rs | 15 +++------------ backend/src/api/mod.rs | 6 ++++-- frontend/src/pages/sections.rs | 5 ++--- scripts/setup-dev.sh | 2 +- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/backend/src/api/admin.rs b/backend/src/api/admin.rs index 887a639f..7b7d9648 100644 --- a/backend/src/api/admin.rs +++ b/backend/src/api/admin.rs @@ -111,16 +111,7 @@ pub fn router(state: crate::state::AppState) -> Router { // acceptable trade-off to avoid the complexity of a distributed rate limiter like Redis. It is REQUIRED // to pair this with an OS-level fail2ban or log-based alerting to compensate for the login endpoint. tracing::info!("Initializing rate limiters. Warning: In-memory rate limiter state resets on restart. Frequent restarts may bypass burst limits."); - let login_governor_config = std::sync::Arc::new( - tower_governor::governor::GovernorConfigBuilder::default() - .key_extractor(crate::api::TrustedProxyIpKeyExtractor) - .per_second(1) - .burst_size(1) - .finish() - .unwrap(), - ); - - let password_governor_config = std::sync::Arc::new( + let shared_auth_governor_config = std::sync::Arc::new( tower_governor::governor::GovernorConfigBuilder::default() .key_extractor(crate::api::TrustedProxyIpKeyExtractor) .per_second(1) @@ -130,11 +121,11 @@ pub fn router(state: crate::state::AppState) -> Router { ); let login_governor_layer = tower_governor::GovernorLayer { - config: login_governor_config, + config: shared_auth_governor_config.clone(), }; let password_governor_layer = tower_governor::GovernorLayer { - config: password_governor_config, + config: shared_auth_governor_config, }; let me_governor_layer = tower_governor::GovernorLayer { diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs index e2f68084..32210974 100644 --- a/backend/src/api/mod.rs +++ b/backend/src/api/mod.rs @@ -68,7 +68,7 @@ pub fn extract_client_ip( if let Ok(parsed_ip) = last_ip.trim().parse::() { if Some(parsed_ip) == peer_ip { tracing::warn!("X-Forwarded-For rightmost IP {} matches the proxy peer IP. This usually indicates a CDN or external load balancer is stripping or improperly appending headers, collapsing all clients into one rate-limit bucket.", parsed_ip); - // fall through to peer_ip fallback below + return None; } else { tracing::debug!("Extracted client IP {} from X-Forwarded-For rightmost entry. Multi-hop proxies (e.g. Cloudflare) may cause all clients to share this IP.", parsed_ip); return Some(parsed_ip.to_string()); @@ -102,7 +102,9 @@ impl tower_governor::key_extractor::KeyExtractor for TrustedProxyIpKeyExtractor let peer_ip = connect_info.map(|ci| ci.0.ip()); - Ok(extract_client_ip(req.headers(), peer_ip).unwrap_or_else(|| "0.0.0.0".to_string())) + let key = extract_client_ip(req.headers(), peer_ip) + .ok_or(tower_governor::GovernorError::UnableToExtractKey)?; + Ok(key) } } diff --git a/frontend/src/pages/sections.rs b/frontend/src/pages/sections.rs index b2e8ea3d..c48c9093 100644 --- a/frontend/src/pages/sections.rs +++ b/frontend/src/pages/sections.rs @@ -339,7 +339,6 @@ pub fn JournalismPage() -> impl IntoView { let preview_text = extract_body_preview(&article.content_html) .unwrap_or_else(|| article.excerpt.clone()); let image = article.images.first().cloned(); - let has_image = image.is_some(); let date = extract_printed_date(&article.content_html) .unwrap_or_else(|| article.display_date.clone()); let date = format_cp_style(&date); @@ -347,8 +346,8 @@ pub fn JournalismPage() -> impl IntoView { view! {
- {if has_image { - view! { article thumbnail }.into_any() + {if let Some(ref img) = image { + view! { article thumbnail }.into_any() } else { view! { diff --git a/scripts/setup-dev.sh b/scripts/setup-dev.sh index a808dbab..929a02ef 100755 --- a/scripts/setup-dev.sh +++ b/scripts/setup-dev.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Local development setup script set -e