Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/bundle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ jobs:
# panics immediately on startup with "Failed to load ayatana-appindicator3".
# libdbusmenu-gtk3/glib are also missing for the same reason — they are
# linked by appindicator but linuxdeploy never traversed that dep tree.
for lib_prefix in libayatana-appindicator3 libdbusmenu-gtk3 libdbusmenu-glib libayatana-ido3-0.4; do
for lib_prefix in libayatana-appindicator3 libayatana-indicator3 libdbusmenu-gtk3 libdbusmenu-glib libayatana-ido3-0.4; do
find /usr/lib/x86_64-linux-gnu -maxdepth 1 -name "${lib_prefix}.so*" 2>/dev/null \
| xargs -I{} cp -P {} squashfs-root/usr/lib/ 2>/dev/null || true
done
Expand Down
88 changes: 8 additions & 80 deletions lib/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ use iroh_proxy_utils::{
};
use iroh_relay::dns::{DnsProtocol, DnsResolver};
use iroh_relay::{RelayConfig, RelayMap};
use iroh_services::{ApiSecret, CLIENT_HOST_ALPN};
use n0_error::{Result, StackResultExt, StdResultExt};
use iroh_services::CLIENT_HOST_ALPN;
use n0_error::{Result, StdResultExt};
use tokio::{
net::TcpListener,
sync::futures::Notified,
Expand Down Expand Up @@ -55,25 +55,15 @@ pub struct ListenNode {
state: StateWrapper,
repo: Repo,
metrics: Arc<UpstreamMetrics>,
_n0des: Option<Arc<iroh_services::Client>>,
_diagnostics: Option<DiagnosticsHandle>,
}

impl ListenNode {
pub async fn new(repo: Repo) -> Result<Self> {
let n0des_api_secret = n0des_api_secret_from_env()?;
Self::with_n0des_api_secret(repo, n0des_api_secret).await
}

#[instrument("listen-node", skip_all)]
pub async fn with_n0des_api_secret(
repo: Repo,
n0des_api_secret: Option<ApiSecret>,
) -> Result<Self> {
pub async fn new(repo: Repo) -> Result<Self> {
let config = repo.config().await?;
let secret_key = repo.listen_key().await?;
let endpoint = build_endpoint(secret_key, &config).await?;
let n0des = build_n0des_client_opt(&endpoint, n0des_api_secret).await;
let state = repo.load_state().await?;

let upstream_proxy = UpstreamProxy::new(state.clone())?;
Expand All @@ -87,15 +77,13 @@ impl ListenNode {
.accept(CLIENT_HOST_ALPN, host)
.spawn();

let this = Self {
return Ok(Self {
repo,
router,
state,
metrics,
_n0des: n0des,
_diagnostics: Some(handle),
};
return Ok(this);
});
}
Ok(None) => None,
Err(err) => {
Expand All @@ -108,15 +96,13 @@ impl ListenNode {
.accept(IROH_HTTP_CONNECT_ALPN, upstream_proxy)
.spawn();

let this = Self {
Ok(Self {
repo,
router,
state,
metrics,
_n0des: n0des,
_diagnostics: diagnostics_handle,
};
Ok(this)
})
}

pub fn state_updated(&self) -> Notified<'_> {
Expand Down Expand Up @@ -247,28 +233,17 @@ impl AuthHandler for StateWrapper {
pub struct ConnectNode {
endpoint: Endpoint,
proxy: DownstreamProxy,
_n0des: Option<Arc<iroh_services::Client>>,
}

impl ConnectNode {
pub async fn new(repo: Repo) -> Result<Self> {
let n0des_api_secret = n0des_api_secret_from_env()?;
Self::with_n0des_api_secret(repo, n0des_api_secret).await
}

#[instrument("connect-node", skip_all)]
pub async fn with_n0des_api_secret(
repo: Repo,
n0des_api_secret: Option<ApiSecret>,
) -> Result<Self> {
pub async fn new(repo: Repo) -> Result<Self> {
let config = repo.config().await?;
let secret_key = repo.connect_key().await?;
let endpoint = build_endpoint(secret_key, &config).await?;
let n0des = build_n0des_client_opt(&endpoint, n0des_api_secret).await;
let pool = DownstreamProxy::new(endpoint.clone(), Default::default());
Ok(Self {
endpoint,
_n0des: n0des,
proxy: pool,
})
}
Expand Down Expand Up @@ -622,53 +597,6 @@ async fn setup_diagnostics(
Ok(Some((host, handle)))
}

pub(crate) fn n0des_api_secret_from_env() -> Result<Option<ApiSecret>> {
let api_secret_str = match std::env::var("N0DES_API_SECRET") {
Ok(s) => s,
Err(_) => match option_env!("BUILD_N0DES_API_SECRET") {
None => return Ok(None),
Some(s) => s.to_string(),
},
};
let api_secret = ApiSecret::from_str(&api_secret_str)
.context("Failed to parse n0des API secret from env variable N0DES_API_SECRET")?;
Ok(Some(api_secret))
}

pub(crate) async fn build_n0des_client_opt(
endpoint: &Endpoint,
api_secret: Option<ApiSecret>,
) -> Option<Arc<iroh_services::Client>> {
match api_secret {
None => {
info!("Disabling metrics collection: N0DES_API_SECRET is not set");
None
}
Some(n0des_api_secret) => match build_n0des_client(endpoint, n0des_api_secret).await {
Ok(client) => Some(client),
Err(err) => {
warn!("Disabling metrics collection: Failed to connect to n0des: {err:#}");
None
}
},
}
}

pub(crate) async fn build_n0des_client(
endpoint: &Endpoint,
api_secret: ApiSecret,
) -> Result<Arc<iroh_services::Client>> {
let remote_id = api_secret.remote.id;
debug!(remote=%remote_id.fmt_short(), "connecting to n0des endpoint");
let client = iroh_services::Client::builder(endpoint)
.api_secret(api_secret)?
.build()
.await
.std_context("Failed to connect to n0des endpoint")?;
info!(remote=%remote_id.fmt_short(), "Connected to n0des endpoint for metrics collection");
Ok(Arc::new(client))
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
35 changes: 35 additions & 0 deletions lib/src/tunnels.rs
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,10 @@ impl TunnelService {
});
}
if !self.publish_tickets {
let current_ids: std::collections::HashSet<&str> =
tunnels.iter().map(|t| t.id.as_str()).collect();

// Sync state for each tunnel returned by the server.
for tunnel in &tunnels {
if let Ok(proxy_state) = proxy_state_from_summary(
&tunnel.id,
Expand All @@ -260,6 +264,37 @@ impl TunnelService {
warn!(tunnel_id = %tunnel.id, "Failed to store proxy state: {err:#}");
}
}

// Remove stale local entries that share host:port with a current tunnel
// but have a different resource_id. These accumulate when a tunnel is
// deleted and recreated with the same endpoint (new ID). Without this,
// the stale enabled entry causes tcp_proxy_exists to return true even
// when the current tunnel is disabled, allowing traffic through.
//
// Scoped to same-endpoint matches so we don't touch entries belonging
// to other projects with different endpoints.
for tunnel in &tunnels {
let Ok(data) = TcpProxyData::from_host_port_str(&strip_scheme(&tunnel.endpoint))
else {
continue;
};
let stale_ids: Vec<String> = self
.listen
.proxies()
.into_iter()
.filter(|p| {
Comment on lines +281 to +285
Copy link

Copilot AI Apr 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

self.listen.proxies() clones the full proxy list (via to_vec()) and is called once per tunnel, making this stale-scan O(tunnels × proxies) with repeated allocations. Consider pulling let proxies = self.listen.proxies(); out of the loop and computing all stale IDs in a single pass (e.g., precompute a set of current endpoints and collect matching stale IDs into a HashSet before removing).

Copilot uses AI. Check for mistakes.
!current_ids.contains(p.id())
&& p.info.service().host == data.host
&& p.info.service().port == data.port
})
.map(|p| p.id().to_string())
.collect();
for id in stale_ids {
if let Err(err) = self.listen.remove_proxy_state(&id).await {
warn!(tunnel_id = %id, "Failed to remove stale proxy state: {err:#}");
Copy link

Copilot AI Apr 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The structured log field tunnel_id is misleading here because id refers to a stale proxy/resource ID being removed, not a tunnel from the server list. Use a field name like resource_id/proxy_id (and keep tunnel_id for actual tunnel IDs) to avoid confusing diagnostics.

Suggested change
warn!(tunnel_id = %id, "Failed to remove stale proxy state: {err:#}");
warn!(proxy_id = %id, "Failed to remove stale proxy state: {err:#}");

Copilot uses AI. Check for mistakes.
}
}
}
}

Ok(tunnels)
Expand Down
3 changes: 1 addition & 2 deletions ui/src/views/proxies_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ pub fn TunnelCard(
let state = state.clone();
let tunnel_id = tunnel_id_for_toggle.clone();
async move {
let updated = state
state
.tunnel_service()
.set_enabled_active(&tunnel_id, next_enabled)
.await?;
Expand All @@ -374,7 +374,6 @@ pub fn TunnelCard(
.await;
}
}
state.upsert_tunnel(updated);
state.bump_tunnel_refresh();
n0_error::Ok(())
}
Expand Down
Loading