diff --git a/cmd/ingestor/db.go b/cmd/ingestor/db.go index 5304e33c..462d2f95 100644 --- a/cmd/ingestor/db.go +++ b/cmd/ingestor/db.go @@ -466,6 +466,19 @@ func applySchema(db *sql.DB) error { log.Println("[migration] observations.raw_hex column added") } + + // Migration: add multibyte capability columns to nodes/inactive_nodes (#903) + row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'multibyte_sup_v1'") + if row.Scan(&migDone) != nil { + log.Println("[migration] Adding multibyte_sup columns to nodes/inactive_nodes...") + db.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_sup INTEGER NOT NULL DEFAULT 0`) + db.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_evidence TEXT`) + db.Exec(`ALTER TABLE inactive_nodes ADD COLUMN multibyte_sup INTEGER NOT NULL DEFAULT 0`) + db.Exec(`ALTER TABLE inactive_nodes ADD COLUMN multibyte_evidence TEXT`) + db.Exec(`INSERT INTO _migrations (name) VALUES ('multibyte_sup_v1')`) + log.Println("[migration] multibyte_sup columns added") + } + // Migration: add last_packet_at column to observers (#last-packet-at) row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observers_last_packet_at_v1'") if row.Scan(&migDone) != nil { diff --git a/cmd/ingestor/db_test.go b/cmd/ingestor/db_test.go index 44b46b53..057ad2f1 100644 --- a/cmd/ingestor/db_test.go +++ b/cmd/ingestor/db_test.go @@ -485,6 +485,63 @@ func TestSchemaNoiseFloorIsReal(t *testing.T) { } } +func TestSchemaMultibyteSupColumns(t *testing.T) { + s, err := OpenStore(tempDBPath(t)) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + cols := map[string]string{} + rows, err := s.db.Query("PRAGMA table_info(nodes)") + if err != nil { + t.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var cid int + var colName, colType string + var notNull, pk int + var dflt interface{} + if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil { + cols[colName] = colType + } + } + + if ct, ok := cols["multibyte_sup"]; !ok { + t.Error("nodes.multibyte_sup column missing") + } else if ct != "INTEGER" { + t.Errorf("nodes.multibyte_sup type=%s, want INTEGER", ct) + } + if _, ok := cols["multibyte_evidence"]; !ok { + t.Error("nodes.multibyte_evidence column missing") + } + + inactiveCols := map[string]string{} + inactiveRows, err := s.db.Query("PRAGMA table_info(inactive_nodes)") + if err != nil { + t.Fatal(err) + } + defer inactiveRows.Close() + for inactiveRows.Next() { + var cid int + var colName, colType string + var notNull, pk int + var dflt interface{} + if inactiveRows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil { + inactiveCols[colName] = colType + } + } + if ct, ok := inactiveCols["multibyte_sup"]; !ok { + t.Error("inactive_nodes.multibyte_sup column missing") + } else if ct != "INTEGER" { + t.Errorf("inactive_nodes.multibyte_sup type=%s, want INTEGER", ct) + } + if _, ok := inactiveCols["multibyte_evidence"]; !ok { + t.Error("inactive_nodes.multibyte_evidence column missing") + } +} + func TestInsertTransmissionWithObserver(t *testing.T) { s, err := OpenStore(tempDBPath(t)) if err != nil { diff --git a/cmd/server/db.go b/cmd/server/db.go index dec43f36..5b219688 100644 --- a/cmd/server/db.go +++ b/cmd/server/db.go @@ -20,7 +20,8 @@ type DB struct { path string // filesystem path to the database file isV3 bool // v3 schema: observer_idx in observations (vs observer_id in v2) hasResolvedPath bool // observations table has resolved_path column - hasObsRawHex bool // observations table has raw_hex column (#881) + hasObsRawHex bool // observations table has raw_hex column (#881) + hasMultibyteSupCols bool // nodes table has multibyte_sup/multibyte_evidence columns (#903) // Channel list cache (60s TTL) — avoids repeated GROUP BY scans (#762) channelsCacheMu sync.Mutex @@ -82,6 +83,24 @@ func (db *DB) detectSchema() { } } } + + nodeRows, err := db.conn.Query("PRAGMA table_info(nodes)") + if err != nil { + return + } + defer nodeRows.Close() + for nodeRows.Next() { + var cid int + var colName string + var colType sql.NullString + var notNull, pk int + var dflt sql.NullString + if nodeRows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil { + if colName == "multibyte_sup" { + db.hasMultibyteSupCols = true + } + } + } } // transmissionBaseSQL returns the SELECT columns and JOIN clause for transmission-centric queries. @@ -790,7 +809,11 @@ func (db *DB) GetNodes(limit, offset int, role, search, before, lastHeard, sortB var total int db.conn.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM nodes %s", w), args...).Scan(&total) - querySQL := fmt.Sprintf("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert FROM nodes %s ORDER BY %s LIMIT ? OFFSET ?", w, order) + nodeColList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert" + if db.hasMultibyteSupCols { + nodeColList += ", multibyte_sup, multibyte_evidence" + } + querySQL := fmt.Sprintf("SELECT %s FROM nodes %s ORDER BY %s LIMIT ? OFFSET ?", nodeColList, w, order) qArgs := append(args, limit, offset) rows, err := db.conn.Query(querySQL, qArgs...) @@ -801,7 +824,7 @@ func (db *DB) GetNodes(limit, offset int, role, search, before, lastHeard, sortB nodes := make([]map[string]interface{}, 0) for rows.Next() { - n := scanNodeRow(rows) + n := db.scanNodeRow(rows) if n != nil { nodes = append(nodes, n) } @@ -816,8 +839,12 @@ func (db *DB) SearchNodes(query string, limit int) ([]map[string]interface{}, er if limit <= 0 { limit = 10 } - rows, err := db.conn.Query(`SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert - FROM nodes WHERE name LIKE ? OR public_key LIKE ? ORDER BY last_seen DESC LIMIT ?`, + colList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert" + if db.hasMultibyteSupCols { + colList += ", multibyte_sup, multibyte_evidence" + } + rows, err := db.conn.Query( + fmt.Sprintf("SELECT %s FROM nodes WHERE name LIKE ? OR public_key LIKE ? ORDER BY last_seen DESC LIMIT ?", colList), "%"+query+"%", query+"%", limit) if err != nil { return nil, err @@ -826,7 +853,7 @@ func (db *DB) SearchNodes(query string, limit int) ([]map[string]interface{}, er nodes := make([]map[string]interface{}, 0) for rows.Next() { - n := scanNodeRow(rows) + n := db.scanNodeRow(rows) if n != nil { nodes = append(nodes, n) } @@ -854,9 +881,12 @@ func (db *DB) GetNodeByPrefix(prefix string) (map[string]interface{}, bool, erro return nil, false, nil } } + prefixColList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert" + if db.hasMultibyteSupCols { + prefixColList += ", multibyte_sup, multibyte_evidence" + } rows, err := db.conn.Query( - `SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert - FROM nodes WHERE public_key LIKE ? LIMIT 2`, + fmt.Sprintf("SELECT %s FROM nodes WHERE public_key LIKE ? LIMIT 2", prefixColList), prefix+"%", ) if err != nil { @@ -866,7 +896,7 @@ func (db *DB) GetNodeByPrefix(prefix string) (map[string]interface{}, bool, erro var first map[string]interface{} count := 0 for rows.Next() { - n := scanNodeRow(rows) + n := db.scanNodeRow(rows) if n == nil { continue } @@ -885,13 +915,17 @@ func (db *DB) GetNodeByPrefix(prefix string) (map[string]interface{}, bool, erro // GetNodeByPubkey returns a single node. func (db *DB) GetNodeByPubkey(pubkey string) (map[string]interface{}, error) { - rows, err := db.conn.Query("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert FROM nodes WHERE public_key = ?", pubkey) + colList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert" + if db.hasMultibyteSupCols { + colList += ", multibyte_sup, multibyte_evidence" + } + rows, err := db.conn.Query(fmt.Sprintf("SELECT %s FROM nodes WHERE public_key = ?", colList), pubkey) if err != nil { return nil, err } defer rows.Close() if rows.Next() { - return scanNodeRow(rows), nil + return db.scanNodeRow(rows), nil } return nil, nil } @@ -1858,7 +1892,7 @@ func scanPacketRow(rows *sql.Rows) map[string]interface{} { } } -func scanNodeRow(rows *sql.Rows) map[string]interface{} { +func (db *DB) scanNodeRow(rows *sql.Rows) map[string]interface{} { var pk string var name, role, lastSeen, firstSeen sql.NullString var lat, lon sql.NullFloat64 @@ -1866,8 +1900,14 @@ func scanNodeRow(rows *sql.Rows) map[string]interface{} { var batteryMv sql.NullInt64 var temperatureC sql.NullFloat64 var foreign sql.NullInt64 + var multibyteSup sql.NullInt64 + var multibyteEvidence sql.NullString - if err := rows.Scan(&pk, &name, &role, &lat, &lon, &lastSeen, &firstSeen, &advertCount, &batteryMv, &temperatureC, &foreign); err != nil { + scanArgs := []interface{}{&pk, &name, &role, &lat, &lon, &lastSeen, &firstSeen, &advertCount, &batteryMv, &temperatureC, &foreign} + if db.hasMultibyteSupCols { + scanArgs = append(scanArgs, &multibyteSup, &multibyteEvidence) + } + if err := rows.Scan(scanArgs...); err != nil { return nil } m := map[string]interface{}{ @@ -1883,6 +1923,12 @@ func scanNodeRow(rows *sql.Rows) map[string]interface{} { "hash_size": nil, "hash_size_inconsistent": false, "foreign": foreign.Valid && foreign.Int64 != 0, + "multibyte_sup": int(multibyteSup.Int64), + } + if multibyteEvidence.Valid { + m["multibyte_evidence"] = multibyteEvidence.String + } else { + m["multibyte_evidence"] = nil } if batteryMv.Valid { m["battery_mv"] = int(batteryMv.Int64) diff --git a/cmd/server/db_test.go b/cmd/server/db_test.go index ceb13722..50599a56 100644 --- a/cmd/server/db_test.go +++ b/cmd/server/db_test.go @@ -2147,3 +2147,32 @@ func TestPerObservationRawHexEnrich(t *testing.T) { } } } + +func TestGetNodesReturnsMultibyteSupField(t *testing.T) { + conn, _ := sql.Open("sqlite", ":memory:") + conn.SetMaxOpenConns(1) + conn.Exec(`CREATE TABLE nodes ( + public_key TEXT PRIMARY KEY, name TEXT, role TEXT, + lat REAL, lon REAL, last_seen TEXT, first_seen TEXT, + advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL, + foreign_advert INTEGER DEFAULT 0, + multibyte_sup INTEGER NOT NULL DEFAULT 0, multibyte_evidence TEXT + )`) + conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen, first_seen) + VALUES ('aabb1122', 'TestRep', 'repeater', '2026-01-01T00:00:00Z', '2026-01-01T00:00:00Z')`) + db := &DB{conn: conn, hasMultibyteSupCols: true} + + nodes, _, _, err := db.GetNodes(10, 0, "", "", "", "", "", "") + if err != nil { + t.Fatal(err) + } + if len(nodes) == 0 { + t.Fatal("expected 1 node") + } + if _, ok := nodes[0]["multibyte_sup"]; !ok { + t.Error("multibyte_sup missing from GetNodes response") + } + if nodes[0]["multibyte_sup"] != 0 { + t.Errorf("multibyte_sup = %v, want 0", nodes[0]["multibyte_sup"]) + } +} diff --git a/cmd/server/multibyte_capability_test.go b/cmd/server/multibyte_capability_test.go index 6e48477c..f22827da 100644 --- a/cmd/server/multibyte_capability_test.go +++ b/cmd/server/multibyte_capability_test.go @@ -172,13 +172,13 @@ func TestMultiByteCapability_Unknown(t *testing.T) { } // TestMultiByteCapability_PrefixCollision tests that when two repeaters -// share the same prefix, one confirmed via advert, the other gets +// share the same 2-byte prefix, one confirmed via advert, the other gets // suspected (not confirmed) from path data alone. func TestMultiByteCapability_PrefixCollision(t *testing.T) { db := setupCapabilityTestDB(t) defer db.conn.Close() - // Two repeaters sharing 1-byte prefix "aa" + // Two repeaters sharing 2-byte prefix "aacc" db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", "aabb000000000001", "RepConfirmed", "repeater", recentTS(24)) db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", @@ -189,14 +189,15 @@ func TestMultiByteCapability_PrefixCollision(t *testing.T) { // RepConfirmed has a 2-byte advert addTestPacket(store, makeTestAdvert("aabb000000000001", 2)) - // A packet with 2-byte path containing 1-byte hop "aa" — both share this prefix + // A packet with hs=2 path containing 2-byte hop "aacc" — matches RepOther's + // 2-byte prefix. Hop length (2 bytes) correctly matches hash_size=2. pathByte := buildPathByte(2, 1) - rawHex := "01" + pathByte + "aa" + rawHex := "01" + pathByte + "aacc" pt := 1 pkt := &StoreTx{ RawHex: rawHex, PayloadType: &pt, - PathJSON: `["aa"]`, + PathJSON: `["aacc"]`, FirstSeen: recentTS(48), } addTestPacket(store, pkt) @@ -385,6 +386,106 @@ func TestMultiByteCapability_RoleColumnPopulated(t *testing.T) { } } +func TestGetMultibyteCapMap_Confirmed(t *testing.T) { + db := setupCapabilityTestDB(t) + defer db.conn.Close() + + store := NewPacketStore(db, nil) + store.cacheMu.Lock() + store.mbCapSnapshot = []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "confirmed", Evidence: "advert"}, + {PublicKey: "1122334455667788", Status: "suspected", Evidence: "path"}, + } + store.cacheMu.Unlock() + + m := store.GetMultibyteCapMap() + if e, ok := m["aabbccdd11223344"]; !ok || e.Status != "confirmed" || e.Evidence != "advert" { + t.Errorf("confirmed entry: got %+v, want confirmed/advert", e) + } + if e, ok := m["1122334455667788"]; !ok || e.Status != "suspected" || e.Evidence != "path" { + t.Errorf("suspected entry: got %+v, want suspected/path", e) + } +} + +func TestGetMultibyteCapMap_EmptyWhenNoSnapshot(t *testing.T) { + db := setupCapabilityTestDB(t) + defer db.conn.Close() + + store := NewPacketStore(db, nil) + m := store.GetMultibyteCapMap() + if len(m) != 0 { + t.Errorf("expected empty map before any analytics cycle, got %d entries", len(m)) + } +} + +func TestEnrichNodeWithMultibyte_Confirmed(t *testing.T) { + node := map[string]interface{}{"public_key": "aabb", "multibyte_sup": 0} + enrichNodeWithMultibyte(node, MultiByteCapEntry{Status: "confirmed", Evidence: "advert"}) + if node["multibyte_sup"] != 2 { + t.Errorf("multibyte_sup = %v, want 2", node["multibyte_sup"]) + } + if node["multibyte_evidence"] != "advert" { + t.Errorf("multibyte_evidence = %v, want advert", node["multibyte_evidence"]) + } +} + +func TestEnrichNodeWithMultibyte_Suspected(t *testing.T) { + node := map[string]interface{}{"public_key": "aabb", "multibyte_sup": 0} + enrichNodeWithMultibyte(node, MultiByteCapEntry{Status: "suspected", Evidence: "path"}) + if node["multibyte_sup"] != 1 { + t.Errorf("multibyte_sup = %v, want 1", node["multibyte_sup"]) + } +} + +func TestEnrichNodeWithMultibyte_ZeroEntryNoChange(t *testing.T) { + node := map[string]interface{}{"public_key": "aabb", "multibyte_sup": 0} + enrichNodeWithMultibyte(node, MultiByteCapEntry{}) // zero-value = unknown, no pubkey + if node["multibyte_sup"] != 0 { + t.Errorf("multibyte_sup = %v, want 0 (unchanged for unknown)", node["multibyte_sup"]) + } + if _, ok := node["multibyte_evidence"]; ok { + t.Error("multibyte_evidence should not be set for unknown entry") + } +} + +// TestMultiByteCapability_HopLengthMismatch tests that a 1-byte hop stored +// in a hs=2 packet (pre-#886 ingestor data) does NOT trigger suspected. +// The 1-byte prefix of a node must not match a malformed single-byte entry +// from a path that was incorrectly split into individual bytes. +func TestMultiByteCapability_HopLengthMismatch(t *testing.T) { + db := setupCapabilityTestDB(t) + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", + "daabccdd11223344", "LegacyNode", "repeater", recentTS(24)) + + store := NewPacketStore(db, nil) + + // Malformed packet: path_json has 1-byte hops but path_byte in raw_hex + // encodes hash_size=2 (pre-#886 ingestor stored path bytes individually). + // buildPathByte(2,1) gives a path byte with hs=2, hop_count=1. + pathByte := buildPathByte(2, 1) + // path_json has 1-byte hop "da" — matches 1-byte prefix of node "daab..." + // raw_hex says hash_size=2. + rawHex := "01" + pathByte + "da" + pt := 1 + pkt := &StoreTx{ + RawHex: rawHex, + PayloadType: &pt, + PathJSON: `["da"]`, + FirstSeen: recentTS(48), + } + addTestPacket(store, pkt) + + caps := store.computeMultiByteCapability(nil) + if len(caps) != 1 { + t.Fatalf("expected 1 entry, got %d", len(caps)) + } + if caps[0].Status != "unknown" { + t.Errorf("expected unknown (hop length mismatch should be filtered), got %s", caps[0].Status) + } +} + // TestMultiByteCapability_AdopterEvidenceTakesPrecedence tests that when // adopter data shows hashSize >= 2 but path evidence says "suspected", // the node is upgraded to "confirmed" (Bug 3, #754). diff --git a/cmd/server/routes.go b/cmd/server/routes.go index 253433df..f34fa083 100644 --- a/cmd/server/routes.go +++ b/cmd/server/routes.go @@ -1100,12 +1100,12 @@ func (s *Server) handleNodes(w http.ResponseWriter, r *http.Request) { } if s.store != nil { hashInfo := s.store.GetNodeHashSizeInfo() - mbCap := s.store.GetMultiByteCapMap() + mbCap := s.store.GetMultibyteCapMap() relayWindow := s.cfg.GetHealthThresholds().RelayActiveHours for _, node := range nodes { if pk, ok := node["public_key"].(string); ok { EnrichNodeWithHashSize(node, hashInfo[pk]) - EnrichNodeWithMultiByte(node, mbCap[pk]) + enrichNodeWithMultibyte(node, mbCap[pk]) if role, _ := node["role"].(string); role == "repeater" || role == "room" { info := s.store.GetRepeaterRelayInfo(pk, relayWindow) if info.LastRelayed != "" { @@ -1220,8 +1220,7 @@ func (s *Server) handleNodeDetail(w http.ResponseWriter, r *http.Request) { if s.store != nil { hashInfo := s.store.GetNodeHashSizeInfo() EnrichNodeWithHashSize(node, hashInfo[pubkey]) - mbCap := s.store.GetMultiByteCapMap() - EnrichNodeWithMultiByte(node, mbCap[pubkey]) + enrichNodeWithMultibyte(node, s.store.GetMultibyteCapMap()[pubkey]) if role, _ := node["role"].(string); role == "repeater" || role == "room" { ht := s.cfg.GetHealthThresholds() info := s.store.GetRepeaterRelayInfo(pubkey, ht.RelayActiveHours) @@ -2380,6 +2379,22 @@ func (s *Server) handleAudioLabBuckets(w http.ResponseWriter, r *http.Request) { // --- Helpers --- +// enrichNodeWithMultibyte sets multibyte_sup and multibyte_evidence on a node map +// from the in-memory analytics cache (avoids the need for DB writes from a ro connection). +func enrichNodeWithMultibyte(node map[string]interface{}, e MultiByteCapEntry) { + sup := 0 + switch e.Status { + case "confirmed": + sup = 2 + case "suspected": + sup = 1 + } + if sup > 0 { + node["multibyte_sup"] = sup + node["multibyte_evidence"] = e.Evidence + } +} + func writeJSON(w http.ResponseWriter, v interface{}) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(v); err != nil { diff --git a/cmd/server/store.go b/cmd/server/store.go index a9b6662d..108a126a 100644 --- a/cmd/server/store.go +++ b/cmd/server/store.go @@ -142,6 +142,7 @@ type PacketStore struct { rfCache map[string]*cachedResult // region → cached RF result topoCache map[string]*cachedResult // region → cached topology result hashCache map[string]*cachedResult // region → cached hash-sizes result + mbCapSnapshot []MultiByteCapEntry // latest computeMultiByteCapability result, under cacheMu collisionCache map[string]*cachedResult // cached hash-collisions result keyed by region ("" = global) chanCache map[string]*cachedResult // region → cached channels result distCache map[string]*cachedResult // region → cached distance result @@ -5852,7 +5853,11 @@ func (s *PacketStore) GetAnalyticsHashSizes(region string) map[string]interface{ } } } - result["multiByteCapability"] = s.computeMultiByteCapability(globalAdopterHS) + mbEntries := s.computeMultiByteCapability(globalAdopterHS) + result["multiByteCapability"] = mbEntries + s.cacheMu.Lock() + s.mbCapSnapshot = mbEntries + s.cacheMu.Unlock() s.cacheMu.Lock() s.hashCache[region] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)} @@ -6768,6 +6773,12 @@ func (s *PacketStore) computeMultiByteCapability(adopterHashSizes map[string]int if hs < 2 { continue } + // Hop length must match hash_size. Pre-#886 ingestor data stored path + // bytes individually (1-byte entries) even for hs=2 packets, so a + // 1-byte prefix could match a malformed hop in a hs=2 packet. + if len(pfx)/2 != hs { + continue + } // This packet uses multi-byte hashes and contains this prefix as a hop for _, e := range entries { if hs > suspected[e.pubkey] { @@ -6847,6 +6858,19 @@ func (s *PacketStore) computeMultiByteCapability(adopterHashSizes map[string]int return result } +// GetMultibyteCapMap returns a pubkey→entry snapshot from the last analytics cycle. +// Used by routes to enrich node responses without a DB write (server conn is read-only). +func (s *PacketStore) GetMultibyteCapMap() map[string]MultiByteCapEntry { + s.cacheMu.Lock() + snap := s.mbCapSnapshot + s.cacheMu.Unlock() + m := make(map[string]MultiByteCapEntry, len(snap)) + for _, e := range snap { + m[e.PublicKey] = e + } + return m +} + // --- Bulk Health (in-memory) --- func (s *PacketStore) GetBulkHealth(limit int, region string) []map[string]interface{} { diff --git a/docs/api-spec.md b/docs/api-spec.md index 082b8861..08f0da54 100644 --- a/docs/api-spec.md +++ b/docs/api-spec.md @@ -308,7 +308,9 @@ Paginated node list with filtering. "hash_size": number | null, // latest hash size (1–3 bytes) "hash_size_inconsistent": boolean, // true if flip-flopping "hash_sizes_seen": [number] | undefined, // present only if >1 unique size seen - "last_heard": string (ISO) | undefined // from in-memory packets or path relay + "last_heard": string (ISO) | undefined, // from in-memory packets or path relay + "multibyte_sup": number, // 0 = unknown, 1 = suspected, 2 = confirmed multibyte capability + "multibyte_evidence": string | null // "advert" | "path" | null } ], "total": number, // total matching count (before pagination) @@ -463,7 +465,9 @@ Node detail page data. "advert_count": number, "hash_size": number | null, "hash_size_inconsistent": boolean, - "hash_sizes_seen": [number] | undefined + "hash_sizes_seen": [number] | undefined, + "multibyte_sup": number, // 0 = unknown, 1 = suspected, 2 = confirmed multibyte capability + "multibyte_evidence": string | null // "advert" | "path" | null }, "recentAdverts": [Packet] // last 20 packets for this node, newest first } diff --git a/docs/superpowers/plans/2026-04-25-multibyte-map-overlay.md b/docs/superpowers/plans/2026-04-25-multibyte-map-overlay.md new file mode 100644 index 00000000..11166344 --- /dev/null +++ b/docs/superpowers/plans/2026-04-25-multibyte-map-overlay.md @@ -0,0 +1,788 @@ +# Multibyte Map Overlay Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Add a map overlay that colors repeater markers by multibyte-capability status (confirmed / suspected / unknown), backed by a persisted DB column populated from the server's existing analytics computation. + +**Architecture:** The ingestor adds `multibyte_sup` + `multibyte_evidence` columns to the `nodes` table via a migration. The server's `PacketStore.persistMultiByteCapability()` upserts results from the already-running `computeMultiByteCapability()` analytics cycle into those columns (no-downgrade guard). `/api/nodes` passes the columns through to the frontend, which applies marker coloring when the new toggle is enabled. + +**Tech Stack:** Go (server + ingestor), SQLite (shared DB), vanilla JS (map.js / Leaflet) + +--- + +## File Map + +| File | Change | +|---|---| +| `cmd/ingestor/db.go` | Add `multibyte_sup_v1` migration (ALTER TABLE nodes + inactive_nodes) | +| `cmd/ingestor/db_test.go` | Add schema test for new columns | +| `cmd/server/db.go` | Add `hasMultibyteSupCols` flag, update `detectSchema()`, convert `scanNodeRow` to DB method with conditional scanning, update three SELECT queries | +| `cmd/server/store.go` | Add `persistMultiByteCapability()`, wire into `GetHashSizes()` | +| `cmd/server/multibyte_capability_test.go` | Add tests for `persistMultiByteCapability()` | +| `public/map.js` | Add toggle to filters + UI, update `makeMarkerIcon` + `makeRepeaterLabelIcon` + `buildPopup` | + +--- + +## Task 1: Ingestor migration — add multibyte_sup columns + +**Files:** +- Modify: `cmd/ingestor/db.go` (after the `scope_name_v1` migration, around line 428) +- Modify: `cmd/ingestor/db_test.go` (add test after `TestSchemaNoiseFloorIsReal`) + +- [ ] **Step 1: Write failing test** + +Add to `cmd/ingestor/db_test.go` after the `TestSchemaNoiseFloorIsReal` function: + +```go +func TestSchemaMultibyteSupColumns(t *testing.T) { + s, err := OpenStore(tempDBPath(t)) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + cols := map[string]string{} + rows, err := s.db.Query("PRAGMA table_info(nodes)") + if err != nil { + t.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var cid int + var colName, colType string + var notNull, pk int + var dflt interface{} + if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil { + cols[colName] = colType + } + } + + if ct, ok := cols["multibyte_sup"]; !ok { + t.Error("nodes.multibyte_sup column missing") + } else if ct != "INTEGER" { + t.Errorf("nodes.multibyte_sup type=%s, want INTEGER", ct) + } + if _, ok := cols["multibyte_evidence"]; !ok { + t.Error("nodes.multibyte_evidence column missing") + } +} +``` + +- [ ] **Step 2: Run test to verify it fails** + +``` +cd cmd/ingestor && go test -run TestSchemaMultibyteSupColumns -v +``` + +Expected: FAIL — columns missing. + +- [ ] **Step 3: Add migration to `cmd/ingestor/db.go`** + +Locate the `scope_name_v1` migration block (around line 421). Add the following block immediately after it (after the closing `}`): + +```go +// Migration: add multibyte capability columns to nodes/inactive_nodes (#903) +row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'multibyte_sup_v1'") +if row.Scan(&migDone) != nil { + log.Println("[migration] Adding multibyte_sup columns to nodes/inactive_nodes...") + db.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_sup INTEGER NOT NULL DEFAULT 0`) + db.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_evidence TEXT`) + db.Exec(`ALTER TABLE inactive_nodes ADD COLUMN multibyte_sup INTEGER NOT NULL DEFAULT 0`) + db.Exec(`ALTER TABLE inactive_nodes ADD COLUMN multibyte_evidence TEXT`) + db.Exec(`INSERT INTO _migrations (name) VALUES ('multibyte_sup_v1')`) + log.Println("[migration] multibyte_sup columns added") +} +``` + +- [ ] **Step 4: Run test to verify it passes** + +``` +cd cmd/ingestor && go test -run TestSchemaMultibyteSupColumns -v +``` + +Expected: PASS. + +- [ ] **Step 5: Run full ingestor test suite** + +``` +cd cmd/ingestor && go test ./... 2>&1 | tail -5 +``` + +Expected: `ok` with no failures. + +- [ ] **Step 6: Commit** + +```bash +git add cmd/ingestor/db.go cmd/ingestor/db_test.go +git commit -m "feat(ingestor/db): add multibyte_sup migration to nodes table (#903)" +``` + +--- + +## Task 2: Server schema detection + node row enrichment + +**Files:** +- Modify: `cmd/server/db.go` + +The server opens the DB read-only and uses `detectSchema()` to discover columns. The `scanNodeRow` standalone function must become a method so it can check the `hasMultibyteSupCols` flag and conditionally scan. + +- [ ] **Step 1: Write failing test** + +Add to `cmd/server/db_test.go`. Find an existing test that calls `GetNodes` and add a new one that asserts `multibyte_sup` is present in the returned map: + +```go +func TestGetNodesReturnsMultibyteSupField(t *testing.T) { + conn, _ := sql.Open("sqlite", ":memory:") + conn.SetMaxOpenConns(1) + conn.Exec(`CREATE TABLE nodes ( + public_key TEXT PRIMARY KEY, name TEXT, role TEXT, + lat REAL, lon REAL, last_seen TEXT, first_seen TEXT, + advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL, + multibyte_sup INTEGER NOT NULL DEFAULT 0, multibyte_evidence TEXT + )`) + conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen, first_seen) + VALUES ('aabb1122', 'TestRep', 'repeater', '2026-01-01T00:00:00Z', '2026-01-01T00:00:00Z')`) + db := &DB{conn: conn, hasMultibyteSupCols: true} + + nodes, _, _, err := db.GetNodes(10, 0, "", "", "", "", "", "") + if err != nil { + t.Fatal(err) + } + if len(nodes) == 0 { + t.Fatal("expected 1 node") + } + if _, ok := nodes[0]["multibyte_sup"]; !ok { + t.Error("multibyte_sup missing from GetNodes response") + } + if nodes[0]["multibyte_sup"] != 0 { + t.Errorf("multibyte_sup = %v, want 0", nodes[0]["multibyte_sup"]) + } +} +``` + +- [ ] **Step 2: Run test to verify it fails** + +``` +cd cmd/server && go test -run TestGetNodesReturnsMultibyteSupField -v +``` + +Expected: FAIL — `hasMultibyteSupCols` field doesn't exist yet. + +- [ ] **Step 3: Add `hasMultibyteSupCols` to `DB` struct** + +In `cmd/server/db.go`, add the field to the `DB` struct (around line 24): + +```go +type DB struct { + conn *sql.DB + path string + isV3 bool + hasResolvedPath bool + hasObsRawHex bool + hasScopeName bool + hasMultibyteSupCols bool // nodes.multibyte_sup column exists (#903) + + channelsCacheMu sync.Mutex + channelsCacheKey string + channelsCacheRes []map[string]interface{} + channelsCacheExp time.Time +} +``` + +- [ ] **Step 4: Add nodes PRAGMA check to `detectSchema()`** + +In `cmd/server/db.go`, at the end of `detectSchema()` (after the `txRows` block that ends around line 103), add: + +```go +nodeRows, err := db.conn.Query("PRAGMA table_info(nodes)") +if err != nil { + return +} +defer nodeRows.Close() +for nodeRows.Next() { + var cid int + var colName string + var colType sql.NullString + var notNull, pk int + var dflt sql.NullString + if nodeRows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil { + if colName == "multibyte_sup" { + db.hasMultibyteSupCols = true + } + } +} +``` + +- [ ] **Step 5: Convert `scanNodeRow` to a DB method with conditional scanning** + +Find `func scanNodeRow(rows *sql.Rows)` (around line 1829). Replace it entirely with: + +```go +func (db *DB) scanNodeRow(rows *sql.Rows) map[string]interface{} { + var pk string + var name, role, lastSeen, firstSeen sql.NullString + var lat, lon sql.NullFloat64 + var advertCount int + var batteryMv sql.NullInt64 + var temperatureC sql.NullFloat64 + var multibyteSup sql.NullInt64 + var multibyteEvidence sql.NullString + + scanArgs := []interface{}{&pk, &name, &role, &lat, &lon, &lastSeen, &firstSeen, &advertCount, &batteryMv, &temperatureC} + if db.hasMultibyteSupCols { + scanArgs = append(scanArgs, &multibyteSup, &multibyteEvidence) + } + if err := rows.Scan(scanArgs...); err != nil { + return nil + } + m := map[string]interface{}{ + "public_key": pk, + "name": nullStr(name), + "role": nullStr(role), + "lat": nullFloat(lat), + "lon": nullFloat(lon), + "last_seen": nullStr(lastSeen), + "first_seen": nullStr(firstSeen), + "advert_count": advertCount, + "last_heard": nullStr(lastSeen), + "hash_size": nil, + "hash_size_inconsistent": false, + "multibyte_sup": int(multibyteSup.Int64), // 0 when not scanned + } + if multibyteEvidence.Valid { + m["multibyte_evidence"] = multibyteEvidence.String + } else { + m["multibyte_evidence"] = nil + } + if batteryMv.Valid { + m["battery_mv"] = int(batteryMv.Int64) + } else { + m["battery_mv"] = nil + } + if temperatureC.Valid { + m["temperature_c"] = temperatureC.Float64 + } else { + m["temperature_c"] = nil + } + return m +} +``` + +- [ ] **Step 6: Update SELECT queries and call sites** + +In `cmd/server/db.go`, make three changes: + +**A. `GetNodes`** (around line 820) — replace the `querySQL` assignment: + +```go +nodeColList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c" +if db.hasMultibyteSupCols { + nodeColList += ", multibyte_sup, multibyte_evidence" +} +querySQL := fmt.Sprintf("SELECT %s FROM nodes %s ORDER BY %s LIMIT ? OFFSET ?", nodeColList, w, order) +``` + +Then change `n := scanNodeRow(rows)` to `n := db.scanNodeRow(rows)`. + +**B. `SearchNodes`** (around line 846) — replace the `rows` query and call: + +```go +colList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c" +if db.hasMultibyteSupCols { + colList += ", multibyte_sup, multibyte_evidence" +} +rows, err := db.conn.Query( + fmt.Sprintf("SELECT %s FROM nodes WHERE name LIKE ? OR public_key LIKE ? ORDER BY last_seen DESC LIMIT ?", colList), + "%"+query+"%", query+"%", limit) +``` + +Change `n := scanNodeRow(rows)` to `n := db.scanNodeRow(rows)`. + +**C. `GetNodeByPubkey`** (around line 866): + +```go +colList := "public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c" +if db.hasMultibyteSupCols { + colList += ", multibyte_sup, multibyte_evidence" +} +rows, err := db.conn.Query( + fmt.Sprintf("SELECT %s FROM nodes WHERE public_key = ?", colList), pubkey) +``` + +Change `return scanNodeRow(rows), nil` to `return db.scanNodeRow(rows), nil`. + +- [ ] **Step 7: Run test to verify it passes** + +``` +cd cmd/server && go test -run TestGetNodesReturnsMultibyteSupField -v +``` + +Expected: PASS. + +- [ ] **Step 8: Run full server test suite** + +``` +cd cmd/server && go test ./... 2>&1 | tail -10 +``` + +Expected: all pass. If `scanNodeRow` was referenced somewhere else as a standalone function, the compiler will catch it — fix those call sites to `db.scanNodeRow(rows)`. + +- [ ] **Step 9: Commit** + +```bash +git add cmd/server/db.go cmd/server/db_test.go +git commit -m "feat(server/db): expose multibyte_sup in node API response (#903)" +``` + +--- + +## Task 3: persistMultiByteCapability + wire into analytics + +**Files:** +- Modify: `cmd/server/store.go` +- Modify: `cmd/server/multibyte_capability_test.go` + +- [ ] **Step 1: Write failing test** + +Add to `cmd/server/multibyte_capability_test.go`: + +```go +// setupCapabilityTestDBWithMultibyteCols returns a DB with multibyte columns. +func setupCapabilityTestDBWithMultibyteCols(t *testing.T) *DB { + t.Helper() + db := setupCapabilityTestDB(t) + db.conn.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_sup INTEGER NOT NULL DEFAULT 0`) + db.conn.Exec(`ALTER TABLE nodes ADD COLUMN multibyte_evidence TEXT`) + db.hasMultibyteSupCols = true + return db +} + +func TestPersistMultiByteCapability_Confirmed(t *testing.T) { + db := setupCapabilityTestDBWithMultibyteCols(t) + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", + "aabbccdd11223344", "RepA", "repeater", recentTS(1)) + + store := NewPacketStore(db, nil) + entries := []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "confirmed", Evidence: "advert"}, + } + store.persistMultiByteCapability(entries) + + var sup int + var evidence sql.NullString + db.conn.QueryRow("SELECT multibyte_sup, multibyte_evidence FROM nodes WHERE public_key = ?", + "aabbccdd11223344").Scan(&sup, &evidence) + + if sup != 2 { + t.Errorf("multibyte_sup = %d, want 2", sup) + } + if !evidence.Valid || evidence.String != "advert" { + t.Errorf("multibyte_evidence = %v, want 'advert'", evidence) + } +} + +func TestPersistMultiByteCapability_Suspected(t *testing.T) { + db := setupCapabilityTestDBWithMultibyteCols(t) + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", + "aabbccdd11223344", "RepA", "repeater", recentTS(1)) + + store := NewPacketStore(db, nil) + entries := []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "suspected", Evidence: "path"}, + } + store.persistMultiByteCapability(entries) + + var sup int + db.conn.QueryRow("SELECT multibyte_sup FROM nodes WHERE public_key = ?", + "aabbccdd11223344").Scan(&sup) + + if sup != 1 { + t.Errorf("multibyte_sup = %d, want 1", sup) + } +} + +func TestPersistMultiByteCapability_NoDowngrade(t *testing.T) { + db := setupCapabilityTestDBWithMultibyteCols(t) + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen, multibyte_sup, multibyte_evidence) VALUES (?, ?, ?, ?, ?, ?)", + "aabbccdd11223344", "RepA", "repeater", recentTS(1), 2, "advert") + + store := NewPacketStore(db, nil) + // Attempt to downgrade confirmed → suspected + entries := []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "suspected", Evidence: "path"}, + } + store.persistMultiByteCapability(entries) + + var sup int + var evidence sql.NullString + db.conn.QueryRow("SELECT multibyte_sup, multibyte_evidence FROM nodes WHERE public_key = ?", + "aabbccdd11223344").Scan(&sup, &evidence) + + if sup != 2 { + t.Errorf("multibyte_sup = %d after downgrade attempt, want 2 (no downgrade)", sup) + } + if !evidence.Valid || evidence.String != "advert" { + t.Errorf("multibyte_evidence = %v after downgrade attempt, want 'advert'", evidence) + } +} + +func TestPersistMultiByteCapability_UnknownSkipped(t *testing.T) { + db := setupCapabilityTestDBWithMultibyteCols(t) + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", + "aabbccdd11223344", "RepA", "repeater", recentTS(1)) + + store := NewPacketStore(db, nil) + entries := []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "unknown", Evidence: ""}, + } + store.persistMultiByteCapability(entries) + + var sup int + db.conn.QueryRow("SELECT multibyte_sup FROM nodes WHERE public_key = ?", + "aabbccdd11223344").Scan(&sup) + + if sup != 0 { + t.Errorf("multibyte_sup = %d after unknown entry, want 0 (unchanged)", sup) + } +} + +func TestPersistMultiByteCapability_NoOpWhenColsMissing(t *testing.T) { + db := setupCapabilityTestDB(t) // no multibyte cols, hasMultibyteSupCols = false + defer db.conn.Close() + + db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)", + "aabbccdd11223344", "RepA", "repeater", recentTS(1)) + + store := NewPacketStore(db, nil) + entries := []MultiByteCapEntry{ + {PublicKey: "aabbccdd11223344", Status: "confirmed", Evidence: "advert"}, + } + // Must not panic or error when columns don't exist + store.persistMultiByteCapability(entries) +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +``` +cd cmd/server && go test -run TestPersistMultiByteCapability -v +``` + +Expected: FAIL — `persistMultiByteCapability` undefined. + +- [ ] **Step 3: Add `persistMultiByteCapability` to `cmd/server/store.go`** + +Add the function directly after `computeMultiByteCapability` (after line 6322, before `// --- Bulk Health`): + +```go +// persistMultiByteCapability upserts confirmed/suspected capability status into +// the nodes table. Status only moves forward (0→1→2); confirmed is never +// overwritten by suspected or unknown. Unknown entries are skipped entirely. +// No-op when hasMultibyteSupCols is false (DB not yet migrated). +func (s *PacketStore) persistMultiByteCapability(entries []MultiByteCapEntry) { + if !s.db.hasMultibyteSupCols { + return + } + for _, e := range entries { + var sup int + switch e.Status { + case "confirmed": + sup = 2 + case "suspected": + sup = 1 + default: + continue // unknown — nothing to write + } + var evidence interface{} + if e.Evidence != "" { + evidence = e.Evidence + } + s.db.conn.Exec( + "UPDATE nodes SET multibyte_sup = ?, multibyte_evidence = ? WHERE public_key = ? AND multibyte_sup < ?", + sup, evidence, e.PublicKey, sup, + ) + } +} +``` + +- [ ] **Step 4: Wire into `GetHashSizes()` in `cmd/server/store.go`** + +Find the block around line 5419: + +```go +result["multiByteCapability"] = s.computeMultiByteCapability(adopterHS) +``` + +Replace with: + +```go +entries := s.computeMultiByteCapability(adopterHS) +result["multiByteCapability"] = entries +s.persistMultiByteCapability(entries) +``` + +- [ ] **Step 5: Run tests to verify they pass** + +``` +cd cmd/server && go test -run TestPersistMultiByteCapability -v +``` + +Expected: all 5 PASS. + +- [ ] **Step 6: Run full server test suite** + +``` +cd cmd/server && go test ./... 2>&1 | tail -10 +``` + +Expected: all pass. + +- [ ] **Step 7: Commit** + +```bash +git add cmd/server/store.go cmd/server/multibyte_capability_test.go +git commit -m "feat(server): persist multibyte capability status to nodes table (#903)" +``` + +--- + +## Task 4: Frontend — toggle + marker styling + +**Files:** +- Modify: `public/map.js` + +- [ ] **Step 1: Add `multibyteOverlay` to filters state** + +In `public/map.js`, find the `filters` declaration (line 12). Add `multibyteOverlay` to it: + +```js +let filters = { repeater: true, companion: true, room: true, sensor: true, observer: true, lastHeard: '30d', neighbors: false, clusters: false, hashLabels: localStorage.getItem('meshcore-map-hash-labels') !== 'false', statusFilter: localStorage.getItem('meshcore-map-status-filter') || 'all', byteSize: localStorage.getItem('meshcore-map-byte-filter') || 'all', multibyteOverlay: localStorage.getItem('meshcore-map-multibyte') === 'true' }; +``` + +- [ ] **Step 2: Add checkbox to map controls HTML** + +In `public/map.js`, find the Byte Size fieldset (around line 114). Add the checkbox line immediately after the `` that closes the `mcByteFilter` div (after line 121): + +```js +
+``` + +- [ ] **Step 3: Wire the change event listener** + +Find where the existing filter event listeners are registered (around line 285, near `mcLastHeard`). Add: + +```js +document.getElementById('mcMultibyte').addEventListener('change', function(e) { + filters.multibyteOverlay = e.target.checked; + localStorage.setItem('meshcore-map-multibyte', e.target.checked); + renderMarkers(); +}); +``` + +- [ ] **Step 4: Update `makeMarkerIcon` to accept and apply multibyte styling** + +In `public/map.js`, find `function makeMarkerIcon(role, isStale, isAlsoObserver)` (line 28). Replace it with: + +```js +function makeMarkerIcon(role, isStale, isAlsoObserver, mbSup) { + const s = ROLE_STYLE[role] || ROLE_STYLE.companion; + const size = s.radius * 2 + 4; + const c = size / 2; + + // Multibyte overlay color overrides (only when mbSup is a number, not null/undefined) + let fill = s.color; + let stroke = '#fff'; + let strokeExtra = ''; + let svgOpacity = 1; + if (mbSup !== null && mbSup !== undefined) { + if (mbSup >= 2) { + fill = '#22c55e'; stroke = '#16a34a'; + } else if (mbSup >= 1) { + fill = '#86efac'; stroke = '#22c55e'; strokeExtra = ' stroke-dasharray="3,2"'; + } else { + svgOpacity = 0.45; + } + } + + let path; + switch (s.shape) { + case 'diamond': + path = `