From 7380453db9ea19fc2f2fc595f8b5b02a50c1b7d4 Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Mon, 13 Apr 2026 15:08:34 -0700 Subject: [PATCH 1/6] fix: don't overwrite tunnel status on toggle Remove the optimistic upsert_tunnel call after set_enabled_active. The API response captures accepted/programmed at the moment of the call, which may be transiently false while EG reconciles the EPP update. This caused a spurious spinner on the tunnel card. bump_tunnel_refresh triggers an immediate re-poll which returns the correct status without stomping on the cache. --- ui/src/views/proxies_list.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ui/src/views/proxies_list.rs b/ui/src/views/proxies_list.rs index 7eb3c05..79c0ce7 100644 --- a/ui/src/views/proxies_list.rs +++ b/ui/src/views/proxies_list.rs @@ -362,7 +362,7 @@ pub fn TunnelCard( let state = state.clone(); let tunnel_id = tunnel_id_for_toggle.clone(); async move { - let updated = state + state .tunnel_service() .set_enabled_active(&tunnel_id, next_enabled) .await?; @@ -374,7 +374,6 @@ pub fn TunnelCard( .await; } } - state.upsert_tunnel(updated); state.bump_tunnel_refresh(); n0_error::Ok(()) } From 8761c9bfd36c73be9519734b87dbe702ed98d5d4 Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Mon, 13 Apr 2026 20:30:36 -0700 Subject: [PATCH 2/6] fix: remove stale proxy state on tunnel recreate When a tunnel is deleted and recreated with the same host:port but a new resource ID, the old proxy state entry lingers in the ListenNode. This causes tcp_proxy_exists to return true even when the new tunnel is disabled, allowing traffic through when it shouldn't. After syncing current tunnel states, scan for proxy entries whose host:port matches a current tunnel but whose ID is not in the current set, and remove them. Scoped to same-endpoint matches to avoid touching entries belonging to other projects. --- lib/src/tunnels.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/lib/src/tunnels.rs b/lib/src/tunnels.rs index 209bcfb..11e5b4c 100644 --- a/lib/src/tunnels.rs +++ b/lib/src/tunnels.rs @@ -249,6 +249,10 @@ impl TunnelService { }); } if !self.publish_tickets { + let current_ids: std::collections::HashSet<&str> = + tunnels.iter().map(|t| t.id.as_str()).collect(); + + // Sync state for each tunnel returned by the server. for tunnel in &tunnels { if let Ok(proxy_state) = proxy_state_from_summary( &tunnel.id, @@ -260,6 +264,38 @@ impl TunnelService { warn!(tunnel_id = %tunnel.id, "Failed to store proxy state: {err:#}"); } } + + // Remove stale local entries that share host:port with a current tunnel + // but have a different resource_id. These accumulate when a tunnel is + // deleted and recreated with the same endpoint (new ID). Without this, + // the stale enabled entry causes tcp_proxy_exists to return true even + // when the current tunnel is disabled, allowing traffic through. + // + // Scoped to same-endpoint matches so we don't touch entries belonging + // to other projects with different endpoints. + for tunnel in &tunnels { + let Ok(data) = + TcpProxyData::from_host_port_str(&strip_scheme(&tunnel.endpoint)) + else { + continue; + }; + let stale_ids: Vec = self + .listen + .proxies() + .into_iter() + .filter(|p| { + !current_ids.contains(p.id()) + && p.info.service().host == data.host + && p.info.service().port == data.port + }) + .map(|p| p.id().to_string()) + .collect(); + for id in stale_ids { + if let Err(err) = self.listen.remove_proxy_state(&id).await { + warn!(tunnel_id = %id, "Failed to remove stale proxy state: {err:#}"); + } + } + } } Ok(tunnels) From ee2f13ba546b5f23134f11c7cab95106beb55c55 Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Mon, 13 Apr 2026 20:34:10 -0700 Subject: [PATCH 3/6] chore: apply cargo fmt --- lib/src/tunnels.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/src/tunnels.rs b/lib/src/tunnels.rs index 11e5b4c..bac25a6 100644 --- a/lib/src/tunnels.rs +++ b/lib/src/tunnels.rs @@ -274,8 +274,7 @@ impl TunnelService { // Scoped to same-endpoint matches so we don't touch entries belonging // to other projects with different endpoints. for tunnel in &tunnels { - let Ok(data) = - TcpProxyData::from_host_port_str(&strip_scheme(&tunnel.endpoint)) + let Ok(data) = TcpProxyData::from_host_port_str(&strip_scheme(&tunnel.endpoint)) else { continue; }; From 30b2ae736292743843eb0abac4d3971a72316b99 Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Fri, 17 Apr 2026 09:13:36 -0700 Subject: [PATCH 4/6] fix: remove n0des diagnostics, net diagnostics now via iroh-services N0DES_API_SECRET / BUILD_N0DES_API_SECRET is superseded by IROH_SERVICES_API_KEY / BUILD_IROH_SERVICES_API_KEY wired up in diagnostics.rs. Remove n0des_api_secret_from_env, build_n0des_client_opt, build_n0des_client, and the _n0des fields from ListenNode/ConnectNode. Co-Authored-By: Claude Sonnet 4.6 --- lib/src/node.rs | 87 +++++-------------------------------------------- 1 file changed, 8 insertions(+), 79 deletions(-) diff --git a/lib/src/node.rs b/lib/src/node.rs index ed80592..eb27641 100644 --- a/lib/src/node.rs +++ b/lib/src/node.rs @@ -17,8 +17,8 @@ use iroh_proxy_utils::{ }; use iroh_relay::dns::{DnsProtocol, DnsResolver}; use iroh_relay::{RelayConfig, RelayMap}; -use iroh_services::{ApiSecret, CLIENT_HOST_ALPN}; -use n0_error::{Result, StackResultExt, StdResultExt}; +use iroh_services::CLIENT_HOST_ALPN; +use n0_error::{Result, StdResultExt}; use tokio::{ net::TcpListener, sync::futures::Notified, @@ -55,25 +55,15 @@ pub struct ListenNode { state: StateWrapper, repo: Repo, metrics: Arc, - _n0des: Option>, _diagnostics: Option, } impl ListenNode { - pub async fn new(repo: Repo) -> Result { - let n0des_api_secret = n0des_api_secret_from_env()?; - Self::with_n0des_api_secret(repo, n0des_api_secret).await - } - #[instrument("listen-node", skip_all)] - pub async fn with_n0des_api_secret( - repo: Repo, - n0des_api_secret: Option, - ) -> Result { + pub async fn new(repo: Repo) -> Result { let config = repo.config().await?; let secret_key = repo.listen_key().await?; let endpoint = build_endpoint(secret_key, &config).await?; - let n0des = build_n0des_client_opt(&endpoint, n0des_api_secret).await; let state = repo.load_state().await?; let upstream_proxy = UpstreamProxy::new(state.clone())?; @@ -87,15 +77,13 @@ impl ListenNode { .accept(CLIENT_HOST_ALPN, host) .spawn(); - let this = Self { + return Ok(Self { repo, router, state, metrics, - _n0des: n0des, _diagnostics: Some(handle), - }; - return Ok(this); + }); } Ok(None) => None, Err(err) => { @@ -108,15 +96,13 @@ impl ListenNode { .accept(IROH_HTTP_CONNECT_ALPN, upstream_proxy) .spawn(); - let this = Self { + Ok(Self { repo, router, state, metrics, - _n0des: n0des, _diagnostics: diagnostics_handle, - }; - Ok(this) + }) } pub fn state_updated(&self) -> Notified<'_> { @@ -247,28 +233,17 @@ impl AuthHandler for StateWrapper { pub struct ConnectNode { endpoint: Endpoint, proxy: DownstreamProxy, - _n0des: Option>, } impl ConnectNode { - pub async fn new(repo: Repo) -> Result { - let n0des_api_secret = n0des_api_secret_from_env()?; - Self::with_n0des_api_secret(repo, n0des_api_secret).await - } - #[instrument("connect-node", skip_all)] - pub async fn with_n0des_api_secret( - repo: Repo, - n0des_api_secret: Option, - ) -> Result { + pub async fn new(repo: Repo) -> Result { let config = repo.config().await?; let secret_key = repo.connect_key().await?; let endpoint = build_endpoint(secret_key, &config).await?; - let n0des = build_n0des_client_opt(&endpoint, n0des_api_secret).await; let pool = DownstreamProxy::new(endpoint.clone(), Default::default()); Ok(Self { endpoint, - _n0des: n0des, proxy: pool, }) } @@ -622,52 +597,6 @@ async fn setup_diagnostics( Ok(Some((host, handle))) } -pub(crate) fn n0des_api_secret_from_env() -> Result> { - let api_secret_str = match std::env::var("N0DES_API_SECRET") { - Ok(s) => s, - Err(_) => match option_env!("BUILD_N0DES_API_SECRET") { - None => return Ok(None), - Some(s) => s.to_string(), - }, - }; - let api_secret = ApiSecret::from_str(&api_secret_str) - .context("Failed to parse n0des API secret from env variable N0DES_API_SECRET")?; - Ok(Some(api_secret)) -} - -pub(crate) async fn build_n0des_client_opt( - endpoint: &Endpoint, - api_secret: Option, -) -> Option> { - match api_secret { - None => { - info!("Disabling metrics collection: N0DES_API_SECRET is not set"); - None - } - Some(n0des_api_secret) => match build_n0des_client(endpoint, n0des_api_secret).await { - Ok(client) => Some(client), - Err(err) => { - warn!("Disabling metrics collection: Failed to connect to n0des: {err:#}"); - None - } - }, - } -} - -pub(crate) async fn build_n0des_client( - endpoint: &Endpoint, - api_secret: ApiSecret, -) -> Result> { - let remote_id = api_secret.remote.id; - debug!(remote=%remote_id.fmt_short(), "connecting to n0des endpoint"); - let client = iroh_services::Client::builder(endpoint) - .api_secret(api_secret)? - .build() - .await - .std_context("Failed to connect to n0des endpoint")?; - info!(remote=%remote_id.fmt_short(), "Connected to n0des endpoint for metrics collection"); - Ok(Arc::new(client)) -} #[cfg(test)] mod tests { From c80b58a2b2b8f7db2099acda3f18f90ffa3e77fa Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Fri, 17 Apr 2026 09:22:04 -0700 Subject: [PATCH 5/6] chore: apply cargo fmt --- lib/src/node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/node.rs b/lib/src/node.rs index eb27641..2d43425 100644 --- a/lib/src/node.rs +++ b/lib/src/node.rs @@ -597,7 +597,6 @@ async fn setup_diagnostics( Ok(Some((host, handle))) } - #[cfg(test)] mod tests { use super::*; From 7e370f6cdee5c7b8f95e715d3ea6276c63adb9a0 Mon Sep 17 00:00:00 2001 From: Zach Smith Date: Fri, 17 Apr 2026 09:42:59 -0700 Subject: [PATCH 6/6] fix: bundle libayatana-indicator3 in AppImage libayatana-appindicator3 depends on libayatana-indicator3 at runtime. PR #141 bundled libayatana-appindicator3 but missed this transitive dep, so dlopen of appindicator3 still failed on distros without it installed. Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/bundle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bundle.yml b/.github/workflows/bundle.yml index c93e188..e42baa4 100644 --- a/.github/workflows/bundle.yml +++ b/.github/workflows/bundle.yml @@ -336,7 +336,7 @@ jobs: # panics immediately on startup with "Failed to load ayatana-appindicator3". # libdbusmenu-gtk3/glib are also missing for the same reason — they are # linked by appindicator but linuxdeploy never traversed that dep tree. - for lib_prefix in libayatana-appindicator3 libdbusmenu-gtk3 libdbusmenu-glib libayatana-ido3-0.4; do + for lib_prefix in libayatana-appindicator3 libayatana-indicator3 libdbusmenu-gtk3 libdbusmenu-glib libayatana-ido3-0.4; do find /usr/lib/x86_64-linux-gnu -maxdepth 1 -name "${lib_prefix}.so*" 2>/dev/null \ | xargs -I{} cp -P {} squashfs-root/usr/lib/ 2>/dev/null || true done