From 9a3f3282b949b454ca3135312d8096cb1be8d4ad Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 21 Feb 2026 15:36:22 +0530 Subject: [PATCH 01/15] feat: implement GossipSub v1.3 support with extensions and improved peer scoring --- libp2p/pubsub/extensions.py | 512 +++++++++++++++++++++++++++++++++++ libp2p/pubsub/gossipsub.py | 308 ++++++++++++++++++++- libp2p/pubsub/pb/rpc.proto | 48 ++++ libp2p/pubsub/pb/rpc_pb2.py | 70 ++--- libp2p/pubsub/pb/rpc_pb2.pyi | 126 ++++++++- libp2p/pubsub/pubsub.py | 33 ++- 6 files changed, 1059 insertions(+), 38 deletions(-) create mode 100644 libp2p/pubsub/extensions.py diff --git a/libp2p/pubsub/extensions.py b/libp2p/pubsub/extensions.py new file mode 100644 index 000000000..f2f2aa96f --- /dev/null +++ b/libp2p/pubsub/extensions.py @@ -0,0 +1,512 @@ +""" +GossipSub v1.3 Extensions Control Message support. + +Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md +extensions.proto: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/extensions/extensions.proto + +Design mirrors the go-libp2p reference implementation (pubsub/extensions.go in +libp2p/go-libp2p-pubsub). + +Key spec rules implemented here: + 1. Extensions control message MUST be in the FIRST message on the stream. + 2. Extensions control message MUST NOT be sent more than once per peer. + 3. A second Extensions control message from the same peer is misbehaviour. + 4. Peers MUST ignore unknown extensions (forward-compatible). +""" + +from __future__ import annotations + +from dataclasses import ( + dataclass, + field, +) +import logging + +from libp2p.peer.id import ( + ID, +) + +from .pb import ( + rpc_pb2, +) + +logger = logging.getLogger("libp2p.pubsub.extensions") + + +@dataclass +class PeerExtensions: + """ + Describes the set of GossipSub v1.3 extensions that a peer supports. + + Each field corresponds to one optional extension. When we receive a peer's + ``ControlExtensions`` protobuf we decode it into a ``PeerExtensions`` + instance. When we build our own hello packet we encode our + ``PeerExtensions`` into the outgoing ``ControlExtensions`` protobuf. + + Adding a new extension: + 1. Add a ``bool`` field here (default ``False``). + 2. Set the field in :meth:`from_control_extensions`. + 3. Populate the field in :meth:`to_control_extensions`. + 4. Add any per-peer activation logic in :class:`ExtensionsState`. + """ + + # Topic Observation extension (GossipSub v1.3 Topic Observation proposal). + # https://ethresear.ch/t/gossipsub-topic-observation-proposed-gossipsub-1-3/20907 + topic_observation: bool = False + + # testExtension – field 6492434 – used exclusively for cross-implementation + # interoperability testing (go-libp2p / rust-libp2p / py-libp2p). + test_extension: bool = False + + @classmethod + def from_control_extensions(cls, ext: rpc_pb2.ControlExtensions) -> PeerExtensions: + """ + Decode a wire ``ControlExtensions`` protobuf into a ``PeerExtensions``. + + Unknown fields in ``ext`` are silently ignored per spec rule 3 + ("Peers MUST ignore unknown extensions"). + """ + return cls( + topic_observation=ext.topicObservation, + test_extension=ext.testExtension, + ) + + def to_control_extensions(self) -> rpc_pb2.ControlExtensions: + """ + Encode this ``PeerExtensions`` into a wire ``ControlExtensions`` protobuf. + + Only fields that are ``True`` are set; unset optional proto fields are + omitted from the serialised bytes (proto2 semantics). + """ + kwargs: dict[str, bool] = {} + if self.topic_observation: + kwargs["topicObservation"] = True + if self.test_extension: + kwargs["testExtension"] = True + return rpc_pb2.ControlExtensions(**kwargs) + + def has_any(self) -> bool: + """Return True if the local peer supports at least one extension.""" + return self.topic_observation or self.test_extension + + def supports_topic_observation(self) -> bool: + return self.topic_observation + + def supports_test_extension(self) -> bool: + return self.test_extension + + +@dataclass +class ExtensionsState: + """ + Per-router state for the GossipSub v1.3 extension exchange protocol. + + Mirrors ``extensionsState`` in go-libp2p's ``extensions.go``. + + Lifecycle (per peer): + 1. ``build_hello_extensions(peer_id)`` is called when we open a stream + and are about to send the first message. It mutates the hello RPC + in-place, adding ``control.extensions`` when appropriate, and records + that we have sent extensions to this peer. + 2. ``handle_rpc(rpc, peer_id)`` is called on every incoming RPC. + - For the *first* RPC from a peer it records their extensions. + - For subsequent RPCs it checks for a duplicate extensions field and + calls ``report_misbehaviour`` if one is found. + + The ``report_misbehaviour`` callback is expected to apply a peer-score + penalty (analogous to go-libp2p's ``reportMisbehavior``). + """ + + # Extensions we advertise to other peers. + my_extensions: PeerExtensions = field(default_factory=PeerExtensions) + + # Extensions we have received from each peer (populated on first RPC). + _peer_extensions: dict[ID, PeerExtensions] = field( + default_factory=dict, init=False, repr=False + ) + + # Set of peer IDs to whom we have already sent the extensions control message. + # Used to enforce the "at most once" rule on the sending side. + _sent_extensions: set[ID] = field(default_factory=set, init=False, repr=False) + + # Optional callback invoked when a peer sends a duplicate extensions message. + # Signature: report_misbehaviour(peer_id: ID) -> None + _report_misbehaviour: object = field(default=None, init=False, repr=False) + + def set_report_misbehaviour(self, callback: object) -> None: + """ + Register the callback that penalises misbehaving peers. + + :param callback: callable(peer_id: ID) -> None + """ + self._report_misbehaviour = callback + + # ------------------------------------------------------------------ + # Sending side + # ------------------------------------------------------------------ + + def build_hello_extensions(self, peer_id: ID, hello: rpc_pb2.RPC) -> rpc_pb2.RPC: + """ + Attach our ``ControlExtensions`` to *hello* if this is a v1.3 peer and + we support at least one extension. + + Per spec rule 1: "If a peer supports any extension, the Extensions + control message MUST be included in the first message on the stream." + + Per spec rule 2: "It MUST NOT be sent more than once." + + This method MUST be called exactly once per peer, before the hello + packet is written to the stream. + + :param peer_id: the remote peer we are greeting. + :param hello: the RPC packet being constructed (mutated in-place). + :return: the (possibly mutated) RPC packet. + """ + if not self.my_extensions.has_any(): + # Nothing to advertise – still record that we did our part so that + # the "sent" tracking is consistent. + self._sent_extensions.add(peer_id) + return hello + + # Ensure control sub-message exists. + if not hello.HasField("control"): + hello.control.CopyFrom(rpc_pb2.ControlMessage()) + + hello.control.extensions.CopyFrom(self.my_extensions.to_control_extensions()) + + self._sent_extensions.add(peer_id) + logger.debug( + "Sent extensions to peer %s: topic_observation=%s test_extension=%s", + peer_id, + self.my_extensions.topic_observation, + self.my_extensions.test_extension, + ) + + # If we already received their extensions (unlikely race on the first + # message, but handled for correctness), activate the shared features. + if peer_id in self._peer_extensions: + self._activate_peer(peer_id) + + return hello + + # ------------------------------------------------------------------ + # Receiving side + # ------------------------------------------------------------------ + + def handle_rpc(self, rpc: rpc_pb2.RPC, peer_id: ID) -> None: + """ + Process the extensions portion of an incoming RPC. + + Called for every incoming RPC. On the very first call for a given + peer this records the peer's extensions; on subsequent calls it checks + for a duplicate ``control.extensions`` field. + + :param rpc: the full incoming RPC message. + :param peer_id: the peer who sent the RPC. + """ + if peer_id not in self._peer_extensions: + # This is the first RPC from this peer. + peer_ext = self._extract_peer_extensions(rpc) + self._peer_extensions[peer_id] = peer_ext + + logger.debug( + "Received extensions from peer %s: topic_observation=%s " + "test_extension=%s", + peer_id, + peer_ext.topic_observation, + peer_ext.test_extension, + ) + + # If we have already sent our extensions, the exchange is complete. + if peer_id in self._sent_extensions: + self._activate_peer(peer_id) + else: + # We already have this peer's extensions. A second + # ``control.extensions`` field is a protocol violation. + if self._rpc_has_extensions(rpc): + logger.warning( + "Peer %s sent a duplicate Extensions control message – " + "this is a protocol violation (GossipSub v1.3 spec rule 2).", + peer_id, + ) + if callable(self._report_misbehaviour): + self._report_misbehaviour(peer_id) # type: ignore[operator] + + # ------------------------------------------------------------------ + # Peer lifecycle + # ------------------------------------------------------------------ + + def remove_peer(self, peer_id: ID) -> None: + """ + Clean up all extension state for a disconnected peer. + + :param peer_id: the peer that disconnected. + """ + self._peer_extensions.pop(peer_id, None) + self._sent_extensions.discard(peer_id) + + # ------------------------------------------------------------------ + # Queries + # ------------------------------------------------------------------ + + def peer_supports_topic_observation(self, peer_id: ID) -> bool: + """ + Return True if *peer_id* has advertised the Topic Observation extension. + + :param peer_id: the remote peer to query. + """ + ext = self._peer_extensions.get(peer_id) + return ext is not None and ext.topic_observation + + def peer_supports_test_extension(self, peer_id: ID) -> bool: + """ + Return True if *peer_id* has advertised the test extension. + + :param peer_id: the remote peer to query. + """ + ext = self._peer_extensions.get(peer_id) + return ext is not None and ext.test_extension + + def both_support_topic_observation(self, peer_id: ID) -> bool: + """ + Return True if both this node and *peer_id* support Topic Observation. + + Feature activation is only valid when both sides have advertised + support (per GossipSub v1.3 spec section on extension behaviour). + + :param peer_id: the remote peer to query. + """ + return ( + self.my_extensions.topic_observation + and self.peer_supports_topic_observation(peer_id) + ) + + def both_support_test_extension(self, peer_id: ID) -> bool: + """ + Return True if both this node and *peer_id* support the test extension. + + :param peer_id: the remote peer to query. + """ + return self.my_extensions.test_extension and self.peer_supports_test_extension( + peer_id + ) + + def get_peer_extensions(self, peer_id: ID) -> PeerExtensions | None: + """ + Return the extensions advertised by *peer_id*, or ``None`` if we have + not yet received the peer's first message. + + :param peer_id: the remote peer to query. + """ + return self._peer_extensions.get(peer_id) + + def sent_extensions_to(self, peer_id: ID) -> bool: + """ + Return True if we have already sent extensions to *peer_id*. + + :param peer_id: the remote peer to query. + """ + return peer_id in self._sent_extensions + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + @staticmethod + def _rpc_has_extensions(rpc: rpc_pb2.RPC) -> bool: + """Return True if *rpc* carries a ``control.extensions`` field.""" + return rpc.HasField("control") and rpc.control.HasField("extensions") + + @staticmethod + def _extract_peer_extensions(rpc: rpc_pb2.RPC) -> PeerExtensions: + """ + Decode the peer's extensions from an RPC, returning an empty + ``PeerExtensions`` if none are present. + """ + if ExtensionsState._rpc_has_extensions(rpc): + return PeerExtensions.from_control_extensions(rpc.control.extensions) + return PeerExtensions() + + def _activate_peer(self, peer_id: ID) -> None: + """ + Called once both sides have exchanged extensions. Logs the active + feature set; subclasses / callers can extend this for bookkeeping. + + :param peer_id: the peer whose extension handshake just completed. + """ + peer_ext = self._peer_extensions[peer_id] + if self.my_extensions.topic_observation and peer_ext.topic_observation: + logger.debug("Topic Observation extension active with peer %s.", peer_id) + if self.my_extensions.test_extension and peer_ext.test_extension: + logger.debug("Test extension active with peer %s.", peer_id) + + +# --------------------------------------------------------------------------- +# Topic Observation state (per router) +# --------------------------------------------------------------------------- + + +class TopicObservationState: + """ + Manages the Topic Observation extension state for a single GossipSub router. + + Spec: https://ethresear.ch/t/gossipsub-topic-observation-proposed-gossipsub-1-3/20907 + + Two directions: + + * **Outbound (we are the observer):** We send ``OBSERVE`` to subscribing + peers and receive ``IHAVE`` notifications. We do NOT receive full + message payloads unless we explicitly request them. + + * **Inbound (we are the subscriber):** We receive ``OBSERVE`` / ``UNOBSERVE`` + from observing peers and send ``IHAVE`` to them when new messages arrive. + + The actual IHAVE emission is handled in ``GossipSub.publish()`` so that + notification is immediate (not deferred to the heartbeat) per the spec. + """ + + def __init__(self) -> None: + # Topics we are currently observing (outbound). + # topic -> set of subscriber peer IDs we sent OBSERVE to. + self._observing: dict[str, set[ID]] = {} + + # Peers that are observing us (inbound). + # topic -> set of observer peer IDs. + self._observers: dict[str, set[ID]] = {} + + # ------------------------------------------------------------------ + # Outbound: this node is an observer + # ------------------------------------------------------------------ + + def add_observing(self, topic: str, subscriber_peer: ID) -> None: + """ + Record that we are observing *topic* via *subscriber_peer*. + + Called after we emit an OBSERVE control message. + + :param topic: the topic we sent OBSERVE for. + :param subscriber_peer: the subscribing peer we sent OBSERVE to. + """ + self._observing.setdefault(topic, set()).add(subscriber_peer) + + def remove_observing(self, topic: str, subscriber_peer: ID) -> None: + """ + Record that we stopped observing *topic* via *subscriber_peer*. + + Called after we emit an UNOBSERVE control message. + + :param topic: the topic we sent UNOBSERVE for. + :param subscriber_peer: the peer we sent UNOBSERVE to. + """ + peers = self._observing.get(topic) + if peers is not None: + peers.discard(subscriber_peer) + if not peers: + del self._observing[topic] + + def is_observing(self, topic: str) -> bool: + """ + Return True if we are currently observing *topic*. + + :param topic: the topic to query. + """ + return bool(self._observing.get(topic)) + + # ------------------------------------------------------------------ + # Inbound: remote peers are observing us + # ------------------------------------------------------------------ + + def add_observer(self, topic: str, observer_peer: ID) -> None: + """ + Record that *observer_peer* wants to observe *topic* from us. + + Called when we handle an incoming OBSERVE control message. + + :param topic: the topic the peer wants to observe. + :param observer_peer: the peer that sent us the OBSERVE. + """ + self._observers.setdefault(topic, set()).add(observer_peer) + logger.debug( + "Peer %s is now observing topic '%s' via us.", observer_peer, topic + ) + + def remove_observer(self, topic: str, observer_peer: ID) -> None: + """ + Remove *observer_peer* from the observer list for *topic*. + + Called when we handle an incoming UNOBSERVE control message. + + :param topic: the topic the peer wants to stop observing. + :param observer_peer: the peer that sent us the UNOBSERVE. + """ + peers = self._observers.get(topic) + if peers is not None: + peers.discard(observer_peer) + if not peers: + del self._observers[topic] + logger.debug( + "Peer %s stopped observing topic '%s' via us.", + observer_peer, + topic, + ) + + def get_observers(self, topic: str) -> set[ID]: + """ + Return the set of peers that are currently observing *topic* from us. + + :param topic: the topic to query. + :return: a copy of the observer set (empty set if none). + """ + return set(self._observers.get(topic, set())) + + def remove_peer(self, peer_id: ID) -> None: + """ + Clean up all Topic Observation state for a disconnected peer. + + :param peer_id: the peer that disconnected. + """ + for topic in list(self._observers): + self._observers[topic].discard(peer_id) + if not self._observers[topic]: + del self._observers[topic] + + for topic in list(self._observing): + self._observing[topic].discard(peer_id) + if not self._observing[topic]: + del self._observing[topic] + + # ------------------------------------------------------------------ + # TODO for contributor: + # Implement the following methods to complete the outbound observer path. + # ------------------------------------------------------------------ + + def get_observing_topics(self) -> set[str]: + """ + Return the set of topics this node is currently observing (outbound). + + Implementation hint: + Return ``set(self._observing.keys())``. + + :return: set of topic strings we sent OBSERVE for. + """ + # TODO: Return the set of topics from self._observing. + raise NotImplementedError( + "get_observing_topics() is left as an easy task for contributors. " + "Hint: return set(self._observing.keys())" + ) + + def get_subscriber_peers_for_topic(self, topic: str) -> set[ID]: + """ + Return the set of subscriber peers we sent OBSERVE to for *topic*. + + Implementation hint: + Return a copy of ``self._observing.get(topic, set())``. + + :param topic: the topic to query. + :return: set of subscriber peer IDs we are observing through. + """ + # TODO: Return a copy of self._observing.get(topic, set()). + raise NotImplementedError( + "get_subscriber_peers_for_topic() is left as an easy task for " + "contributors. Hint: return set(self._observing.get(topic, set()))" + ) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 9149cd1c3..295d2d734 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -43,6 +43,11 @@ from .exceptions import ( NoPubsubAttached, ) +from .extensions import ( + ExtensionsState, + PeerExtensions, + TopicObservationState, +) from .mcache import ( MessageCache, ) @@ -64,6 +69,9 @@ PROTOCOL_ID = TProtocol("/meshsub/1.0.0") PROTOCOL_ID_V11 = TProtocol("/meshsub/1.1.0") PROTOCOL_ID_V12 = TProtocol("/meshsub/1.2.0") +# GossipSub v1.3: Extensions Control Message +# Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md +PROTOCOL_ID_V13 = TProtocol("/meshsub/1.3.0") PROTOCOL_ID_V20 = TProtocol("/meshsub/2.0.0") logger = logging.getLogger("libp2p.pubsub.gossipsub") @@ -108,6 +116,11 @@ class GossipSub(IPubsubRouter, Service): int # Maximum number of message IDs to track per peer in IDONTWANT lists ) + # Gossipsub v1.3 – Extensions Control Message + # Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md + extensions_state: ExtensionsState + topic_observation: TopicObservationState + # Gossipsub v2.0 adaptive features adaptive_gossip_enabled: bool network_health_score: float # 0.0 (poor) to 1.0 (excellent) @@ -151,6 +164,10 @@ def __init__( max_messages_per_topic_per_second: float = 10.0, eclipse_protection_enabled: bool = True, min_mesh_diversity_ips: int = 3, + # GossipSub v1.3 – Extensions Control Message + # Pass a PeerExtensions instance to advertise your supported extensions + # to remote peers in the first message on every new stream. + my_extensions: PeerExtensions | None = None, ) -> None: self.protocols = list(protocols) self.pubsub = None @@ -197,6 +214,23 @@ def __init__( self.dont_send_message_ids = dict() self.max_idontwant_messages = max_idontwant_messages + # Gossipsub v1.3 – Extensions Control Message + # ExtensionsState tracks: + # - which extensions we advertise (my_extensions) + # - which extensions each peer has advertised (_peer_extensions) + # - whether we have already sent our extensions to a peer (_sent_extensions) + self.extensions_state = ExtensionsState( + my_extensions=my_extensions or PeerExtensions() + ) + # Wire up the misbehaviour reporter after scorer is initialised. + self.extensions_state.set_report_misbehaviour( + self._report_extensions_misbehaviour + ) + + # Topic Observation extension state (per router). + # Tracks observers (inbound) and topics we are observing (outbound). + self.topic_observation = TopicObservationState() + # Gossipsub v2.0 adaptive features self.adaptive_gossip_enabled = adaptive_gossip_enabled self.network_health_score = 1.0 # Start optimistic @@ -223,6 +257,24 @@ def supports_scoring(self, peer_id: ID) -> bool: return self.peer_protocol.get(peer_id) in ( PROTOCOL_ID_V11, PROTOCOL_ID_V12, + PROTOCOL_ID_V13, + PROTOCOL_ID_V20, + ) + + def supports_v13_features(self, peer_id: ID) -> bool: + """ + Check if *peer_id* negotiated the GossipSub v1.3 protocol. + + v1.3 is required for the Extensions Control Message mechanism and the + Topic Observation extension. A peer that negotiated v1.3 (or later) + MUST have received (and sent) the Extensions control message in the + first stream message. + + :param peer_id: The peer to check. + :return: True if peer negotiated ``/meshsub/1.3.0`` or later. + """ + return self.peer_protocol.get(peer_id) in ( + PROTOCOL_ID_V13, PROTOCOL_ID_V20, ) @@ -282,6 +334,7 @@ def add_peer(self, peer_id: ID, protocol_id: TProtocol | None) -> None: PROTOCOL_ID, PROTOCOL_ID_V11, PROTOCOL_ID_V12, + PROTOCOL_ID_V13, PROTOCOL_ID_V20, floodsub.PROTOCOL_ID, ): @@ -325,6 +378,12 @@ def remove_peer(self, peer_id: ID) -> None: # Clean up security state self._cleanup_security_state(peer_id) + # GossipSub v1.3: clean up extension exchange state for this peer + self.extensions_state.remove_peer(peer_id) + + # Topic Observation: clean up observer / observing state for this peer + self.topic_observation.remove_peer(peer_id) + async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: """ Invoked to process control messages in the RPC envelope. @@ -341,6 +400,12 @@ async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: control_message = rpc.control + # GossipSub v1.3: process Extensions control message BEFORE dispatching + # other control messages. This must happen on every incoming RPC so + # that the "at most once" duplicate-detection logic runs correctly. + if self.supports_v13_features(sender_peer_id): + self.extensions_state.handle_rpc(rpc, sender_peer_id) + # Relay each rpc control message to the appropriate handler if control_message.ihave: for ihave in control_message.ihave: @@ -358,6 +423,15 @@ async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: for idontwant in control_message.idontwant: await self.handle_idontwant(idontwant, sender_peer_id) + # GossipSub v1.3 – Topic Observation extension + if self.supports_v13_features(sender_peer_id): + if control_message.observe: + for observe in control_message.observe: + await self.handle_observe(observe, sender_peer_id) + if control_message.unobserve: + for unobserve in control_message.unobserve: + await self.handle_unobserve(unobserve, sender_peer_id) + async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: """Invoked to forward a new message that has been validated.""" # Security checks for Gossipsub 2.0 @@ -401,6 +475,11 @@ async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: # Send IDONTWANT to mesh peers about this message await self._emit_idontwant_for_message(msg_id, pubsub_msg.topicIDs) + # GossipSub v1.3 – Topic Observation: immediately notify observers with IHAVE. + # Unlike the heartbeat gossip, notification is sent right after receiving + # a message so observers get near-real-time awareness of new messages. + await self._notify_observers(pubsub_msg.topicIDs, msg_id) + for peer_id in peers_gen: if self.pubsub is None: raise NoPubsubAttached @@ -922,7 +1001,13 @@ def _get_in_topic_gossipsub_peers_from_minus( peer_id for peer_id in self.pubsub.peer_topics[topic] if self.peer_protocol.get(peer_id) - in (PROTOCOL_ID, PROTOCOL_ID_V11, PROTOCOL_ID_V12, PROTOCOL_ID_V20) + in ( + PROTOCOL_ID, + PROTOCOL_ID_V11, + PROTOCOL_ID_V12, + PROTOCOL_ID_V13, + PROTOCOL_ID_V20, + ) } if backoff_check: # filter out peers that are in back off for this topic @@ -1416,6 +1501,227 @@ async def handle_idontwant( self.max_idontwant_messages, ) + # ------------------------------------------------------------------ # + # GossipSub v1.3 – Topic Observation extension handlers # + # ------------------------------------------------------------------ # + + async def handle_observe( + self, observe_msg: rpc_pb2.ControlObserve, sender_peer_id: ID + ) -> None: + """ + Handle an incoming OBSERVE control message. + + An OBSERVE message is sent by an *observer* peer that wants to receive + IHAVE notifications for ``topicID`` without being a full subscriber. + After this call, every time a new message for ``topicID`` arrives we + will send an IHAVE to *sender_peer_id* immediately (not at the next + heartbeat). + + Per the Topic Observation spec, only peers that: + 1. Negotiated ``/meshsub/1.3.0`` (checked by the caller), AND + 2. Advertised the ``topicObservation`` extension in their first message + should be permitted to send OBSERVE. + + :param observe_msg: The OBSERVE control message. + :param sender_peer_id: ID of the peer that sent the OBSERVE. + """ + topic: str = observe_msg.topicID + if not topic: + logger.debug( + "Received OBSERVE with empty topicID from peer %s, ignoring.", + sender_peer_id, + ) + return + + # Only honour OBSERVE if the peer advertised topic_observation support. + if not self.extensions_state.peer_supports_topic_observation(sender_peer_id): + logger.debug( + "Peer %s sent OBSERVE but did not advertise topic_observation " + "extension – ignoring.", + sender_peer_id, + ) + return + + self.topic_observation.add_observer(topic, sender_peer_id) + logger.debug( + "OBSERVE: peer %s is now observing topic '%s'.", sender_peer_id, topic + ) + + async def handle_unobserve( + self, unobserve_msg: rpc_pb2.ControlUnobserve, sender_peer_id: ID + ) -> None: + """ + Handle an incoming UNOBSERVE control message. + + Stops sending IHAVE notifications to *sender_peer_id* for ``topicID``. + + :param unobserve_msg: The UNOBSERVE control message. + :param sender_peer_id: ID of the peer that sent the UNOBSERVE. + """ + topic: str = unobserve_msg.topicID + if not topic: + logger.debug( + "Received UNOBSERVE with empty topicID from peer %s, ignoring.", + sender_peer_id, + ) + return + + self.topic_observation.remove_observer(topic, sender_peer_id) + logger.debug( + "UNOBSERVE: peer %s stopped observing topic '%s'.", + sender_peer_id, + topic, + ) + + # ------------------------------------------------------------------ # + # GossipSub v1.3 – Topic Observation extension emitters # + # ------------------------------------------------------------------ # + + async def emit_observe(self, topic: str, to_peer: ID) -> None: + """ + Emit an OBSERVE control message to *to_peer* for *topic*. + + Call this when this node wants to observe *topic* via a subscribing + peer. After sending OBSERVE, *to_peer* should begin sending IHAVE + to us when new messages arrive in *topic*. + + :param topic: The topic to start observing. + :param to_peer: The subscribing peer to send OBSERVE to. + """ + observe_msg = rpc_pb2.ControlObserve(topicID=topic) + control_msg = rpc_pb2.ControlMessage() + control_msg.observe.extend([observe_msg]) + + await self.emit_control_message(control_msg, to_peer) + self.topic_observation.add_observing(topic, to_peer) + logger.debug("OBSERVE sent: topic='%s' to peer %s.", topic, to_peer) + + async def emit_unobserve(self, topic: str, to_peer: ID) -> None: + """ + Emit an UNOBSERVE control message to *to_peer* for *topic*. + + Call this to stop observing *topic* via *to_peer*. + + :param topic: The topic to stop observing. + :param to_peer: The subscribing peer to send UNOBSERVE to. + """ + unobserve_msg = rpc_pb2.ControlUnobserve(topicID=topic) + control_msg = rpc_pb2.ControlMessage() + control_msg.unobserve.extend([unobserve_msg]) + + await self.emit_control_message(control_msg, to_peer) + self.topic_observation.remove_observing(topic, to_peer) + logger.debug("UNOBSERVE sent: topic='%s' to peer %s.", topic, to_peer) + + async def _notify_observers(self, topic_ids: Iterable[str], msg_id: bytes) -> None: + """ + Immediately send an IHAVE to every observer of each topic in + *topic_ids* when a new message arrives. + + Unlike the gossip heartbeat, this notification is *immediate* so that + observers get near-real-time awareness (per the Topic Observation spec). + Observers are not expected to reply with IWANT in this flow; they use + the IHAVE purely as a presence notification. + + :param topic_ids: Topics the new message belongs to. + :param msg_id: The message ID to include in the IHAVE notifications. + """ + if self.pubsub is None: + return + pubsub = self.pubsub # narrow type for pyrefly / mypy + + msg_id_str = str(msg_id) + + for topic in topic_ids: + observers = self.topic_observation.get_observers(topic) + if not observers: + continue + + for observer_peer in observers: + if observer_peer not in pubsub.peers: + continue + await self.emit_ihave(topic, [msg_id_str], observer_peer) + logger.debug( + "Topic Observation: sent IHAVE(topic='%s', msg_id=%s) " + "to observer %s.", + topic, + msg_id_str, + observer_peer, + ) + + # ------------------------------------------------------------------ # + # GossipSub v1.3 – Extensions misbehaviour reporting # + # ------------------------------------------------------------------ # + + def _report_extensions_misbehaviour(self, peer_id: ID) -> None: + """ + Apply a peer-score penalty when a peer sends a duplicate Extensions + control message (violates GossipSub v1.3 spec rule 2). + + Mirrors go-libp2p's ``reportMisbehavior`` callback. + + :param peer_id: The misbehaving peer. + """ + if self.scorer is not None: + self.scorer.penalize_behavior(peer_id, 1.0) + logger.warning( + "Applied score penalty to peer %s for sending duplicate " + "Extensions control message (GossipSub v1.3 violation).", + peer_id, + ) + + # ------------------------------------------------------------------ # + # GossipSub v1.3 – TODO for contributors + # ------------------------------------------------------------------ # + + async def start_observing_topic(self, topic: str) -> None: + """ + Start observing *topic* by sending OBSERVE to all in-topic v1.3 peers + that support the Topic Observation extension. + + This is the high-level API for callers that want to become an observer. + Internally it picks suitable subscriber peers and calls + :meth:`emit_observe` for each of them. + + Implementation hints: + 1. Check ``self.pubsub`` is not None. + 2. Get the peers subscribed to *topic* from + ``self.pubsub.peer_topics.get(topic, set())``. + 3. For each peer: only send OBSERVE if + ``self.supports_v13_features(peer)`` is True AND + ``self.extensions_state.both_support_topic_observation(peer)`` + is True. + 4. Await ``self.emit_observe(topic, peer)`` for each qualifying peer. + + :param topic: The topic to start observing. + """ + # TODO (contributor): implement start_observing_topic. + # See implementation hints in the docstring above. + raise NotImplementedError( + "start_observing_topic() is left as an easy task for contributors. " + "See the docstring for step-by-step implementation hints." + ) + + async def stop_observing_topic(self, topic: str) -> None: + """ + Stop observing *topic* by sending UNOBSERVE to all peers we previously + sent OBSERVE to for *topic*. + + Implementation hints: + 1. Get the set of peers we sent OBSERVE to via + ``self.topic_observation.get_subscriber_peers_for_topic(topic)``. + (Note: this calls the TODO method in TopicObservationState.) + 2. Await ``self.emit_unobserve(topic, peer)`` for each of them. + + :param topic: The topic to stop observing. + """ + # TODO (contributor): implement stop_observing_topic. + # See implementation hints in the docstring above. + raise NotImplementedError( + "stop_observing_topic() is left as an easy task for contributors. " + "See the docstring for step-by-step implementation hints." + ) + def _track_peer_ip(self, peer_id: ID) -> None: """ Track the IP address of a peer for colocation scoring. diff --git a/libp2p/pubsub/pb/rpc.proto b/libp2p/pubsub/pb/rpc.proto index f48df92f2..45008a232 100644 --- a/libp2p/pubsub/pb/rpc.proto +++ b/libp2p/pubsub/pb/rpc.proto @@ -1,4 +1,7 @@ // Modified from https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto +// Updated with GossipSub v1.3 Extensions Control Message support. +// Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md +// extensions.proto: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/extensions/extensions.proto syntax = "proto2"; @@ -15,6 +18,12 @@ message RPC { optional ControlMessage control = 3; optional bytes senderRecord = 4; + + // Canonical Extensions register their top-level RPC messages here. + + // Experimental Extensions MUST use field numbers larger than 0x200000 + // so they are encoded with at least 4 bytes (per GossipSub v1.3 spec). + optional TestExtension testExtension = 6492434; } message Message { @@ -32,6 +41,27 @@ message ControlMessage { repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; repeated ControlIDontWant idontwant = 5; + + // GossipSub v1.3: Extensions control message (MUST be in first message, + // MUST NOT be sent more than once per peer). + // Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md + optional ControlExtensions extensions = 6; + + // Topic Observation extension control messages. + repeated ControlObserve observe = 7; + repeated ControlUnobserve unobserve = 8; +} + +// ControlExtensions advertises which v1.3 extensions the sending peer supports. +// Peers MUST ignore unknown fields (forward-compatible per spec). +// Field numbers for experimental extensions MUST be > 0x200000. +message ControlExtensions { + // Set to true if the peer supports the Topic Observation extension. + optional bool topicObservation = 1; + + // Experimental extensions use field numbers > 0x200000. + // testExtension: used for cross-implementation interop testing (go-libp2p compat). + optional bool testExtension = 6492434; } message ControlIHave { @@ -57,6 +87,24 @@ message ControlIDontWant { repeated bytes messageIDs = 1; } +// ControlObserve: Topic Observation extension. +// Sent by an observer to start receiving IHAVE notifications for a topic +// without being a full subscriber. (GossipSub v1.3 Topic Observation extension) +message ControlObserve { + optional string topicID = 1; +} + +// ControlUnobserve: Topic Observation extension. +// Sent by an observer to stop receiving IHAVE notifications for a topic. +message ControlUnobserve { + optional string topicID = 1; +} + +// TestExtension: used for interoperability testing of the v1.3 extension +// mechanism between implementations (go-libp2p, rust-libp2p, py-libp2p). +// An empty message — its presence on the wire is the signal. +message TestExtension {} + message PeerInfo { optional bytes peerID = 1; optional bytes signedPeerRecord = 2; diff --git a/libp2p/pubsub/pb/rpc_pb2.py b/libp2p/pubsub/pb/rpc_pb2.py index 03043ab98..4edee2a4e 100644 --- a/libp2p/pubsub/pb/rpc_pb2.py +++ b/libp2p/pubsub/pb/rpc_pb2.py @@ -4,11 +4,11 @@ # source: libp2p/pubsub/pb/rpc.proto # Protobuf Python Version: 5.29.3 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder _runtime_version.ValidateProtobufRuntimeVersion( _runtime_version.Domain.PUBLIC, 5, @@ -24,7 +24,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xca\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x12\x14\n\x0csenderRecord\x18\x04 \x01(\x0c\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xe0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\x12.\n\tidontwant\x18\x05 \x03(\x0b\x32\x1b.pubsub.pb.ControlIDontWant\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"&\n\x10\x43ontrolIDontWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\x0c\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xfe\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x12\x14\n\x0csenderRecord\x18\x04 \x01(\x0c\x12\x32\n\rtestExtension\x18\x92\xa2\x8c\x03 \x01(\x0b\x32\x18.pubsub.pb.TestExtension\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xee\x02\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\x12.\n\tidontwant\x18\x05 \x03(\x0b\x32\x1b.pubsub.pb.ControlIDontWant\x12\x30\n\nextensions\x18\x06 \x01(\x0b\x32\x1c.pubsub.pb.ControlExtensions\x12*\n\x07observe\x18\x07 \x03(\x0b\x32\x19.pubsub.pb.ControlObserve\x12.\n\tunobserve\x18\x08 \x03(\x0b\x32\x1b.pubsub.pb.ControlUnobserve\"G\n\x11\x43ontrolExtensions\x12\x18\n\x10topicObservation\x18\x01 \x01(\x08\x12\x18\n\rtestExtension\x18\x92\xa2\x8c\x03 \x01(\x08\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"&\n\x10\x43ontrolIDontWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\x0c\"!\n\x0e\x43ontrolObserve\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"#\n\x10\x43ontrolUnobserve\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"\x0f\n\rTestExtension\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -32,33 +32,41 @@ if not _descriptor._USE_C_DESCRIPTORS: DESCRIPTOR._loaded_options = None _globals['_RPC']._serialized_start=42 - _globals['_RPC']._serialized_end=244 - _globals['_RPC_SUBOPTS']._serialized_start=199 - _globals['_RPC_SUBOPTS']._serialized_end=244 - _globals['_MESSAGE']._serialized_start=246 - _globals['_MESSAGE']._serialized_end=351 - _globals['_CONTROLMESSAGE']._serialized_start=354 - _globals['_CONTROLMESSAGE']._serialized_end=578 - _globals['_CONTROLIHAVE']._serialized_start=580 - _globals['_CONTROLIHAVE']._serialized_end=631 - _globals['_CONTROLIWANT']._serialized_start=633 - _globals['_CONTROLIWANT']._serialized_end=667 - _globals['_CONTROLGRAFT']._serialized_start=669 - _globals['_CONTROLGRAFT']._serialized_end=700 - _globals['_CONTROLPRUNE']._serialized_start=702 - _globals['_CONTROLPRUNE']._serialized_end=786 - _globals['_CONTROLIDONTWANT']._serialized_start=788 - _globals['_CONTROLIDONTWANT']._serialized_end=826 - _globals['_PEERINFO']._serialized_start=828 - _globals['_PEERINFO']._serialized_end=880 - _globals['_TOPICDESCRIPTOR']._serialized_start=883 - _globals['_TOPICDESCRIPTOR']._serialized_end=1274 - _globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_start=1016 - _globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_end=1140 - _globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_start=1102 - _globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_end=1140 - _globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_start=1143 - _globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_end=1274 - _globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_start=1231 - _globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_end=1274 + _globals['_RPC']._serialized_end=296 + _globals['_RPC_SUBOPTS']._serialized_start=251 + _globals['_RPC_SUBOPTS']._serialized_end=296 + _globals['_MESSAGE']._serialized_start=298 + _globals['_MESSAGE']._serialized_end=403 + _globals['_CONTROLMESSAGE']._serialized_start=406 + _globals['_CONTROLMESSAGE']._serialized_end=772 + _globals['_CONTROLEXTENSIONS']._serialized_start=774 + _globals['_CONTROLEXTENSIONS']._serialized_end=845 + _globals['_CONTROLIHAVE']._serialized_start=847 + _globals['_CONTROLIHAVE']._serialized_end=898 + _globals['_CONTROLIWANT']._serialized_start=900 + _globals['_CONTROLIWANT']._serialized_end=934 + _globals['_CONTROLGRAFT']._serialized_start=936 + _globals['_CONTROLGRAFT']._serialized_end=967 + _globals['_CONTROLPRUNE']._serialized_start=969 + _globals['_CONTROLPRUNE']._serialized_end=1053 + _globals['_CONTROLIDONTWANT']._serialized_start=1055 + _globals['_CONTROLIDONTWANT']._serialized_end=1093 + _globals['_CONTROLOBSERVE']._serialized_start=1095 + _globals['_CONTROLOBSERVE']._serialized_end=1128 + _globals['_CONTROLUNOBSERVE']._serialized_start=1130 + _globals['_CONTROLUNOBSERVE']._serialized_end=1165 + _globals['_TESTEXTENSION']._serialized_start=1167 + _globals['_TESTEXTENSION']._serialized_end=1182 + _globals['_PEERINFO']._serialized_start=1184 + _globals['_PEERINFO']._serialized_end=1236 + _globals['_TOPICDESCRIPTOR']._serialized_start=1239 + _globals['_TOPICDESCRIPTOR']._serialized_end=1630 + _globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_start=1372 + _globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_end=1496 + _globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_start=1458 + _globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_end=1496 + _globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_start=1499 + _globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_end=1630 + _globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_start=1587 + _globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_end=1630 # @@protoc_insertion_point(module_scope) diff --git a/libp2p/pubsub/pb/rpc_pb2.pyi b/libp2p/pubsub/pb/rpc_pb2.pyi index 405273f9d..956df5041 100644 --- a/libp2p/pubsub/pb/rpc_pb2.pyi +++ b/libp2p/pubsub/pb/rpc_pb2.pyi @@ -1,7 +1,11 @@ """ @generated by mypy-protobuf. Do not edit manually! isort:skip_file -Modified from https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto""" +Modified from https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto +Updated with GossipSub v1.3 Extensions Control Message support. +Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md +extensions.proto: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/extensions/extensions.proto +""" import builtins import collections.abc @@ -45,6 +49,7 @@ class RPC(google.protobuf.message.Message): PUBLISH_FIELD_NUMBER: builtins.int CONTROL_FIELD_NUMBER: builtins.int SENDERRECORD_FIELD_NUMBER: builtins.int + TESTEXTENSION_FIELD_NUMBER: builtins.int senderRecord: builtins.bytes @property def subscriptions(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___RPC.SubOpts]: ... @@ -52,6 +57,14 @@ class RPC(google.protobuf.message.Message): def publish(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message]: ... @property def control(self) -> global___ControlMessage: ... + @property + def testExtension(self) -> global___TestExtension: + """Canonical Extensions register their top-level RPC messages here. + + Experimental Extensions MUST use field numbers larger than 0x200000 + so they are encoded with at least 4 bytes (per GossipSub v1.3 spec). + """ + def __init__( self, *, @@ -59,9 +72,10 @@ class RPC(google.protobuf.message.Message): publish: collections.abc.Iterable[global___Message] | None = ..., control: global___ControlMessage | None = ..., senderRecord: builtins.bytes | None = ..., + testExtension: global___TestExtension | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["control", b"control", "senderRecord", b"senderRecord"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["control", b"control", "publish", b"publish", "senderRecord", b"senderRecord", "subscriptions", b"subscriptions"]) -> None: ... + def HasField(self, field_name: typing.Literal["control", b"control", "senderRecord", b"senderRecord", "testExtension", b"testExtension"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["control", b"control", "publish", b"publish", "senderRecord", b"senderRecord", "subscriptions", b"subscriptions", "testExtension", b"testExtension"]) -> None: ... global___RPC = RPC @@ -106,6 +120,9 @@ class ControlMessage(google.protobuf.message.Message): GRAFT_FIELD_NUMBER: builtins.int PRUNE_FIELD_NUMBER: builtins.int IDONTWANT_FIELD_NUMBER: builtins.int + EXTENSIONS_FIELD_NUMBER: builtins.int + OBSERVE_FIELD_NUMBER: builtins.int + UNOBSERVE_FIELD_NUMBER: builtins.int @property def ihave(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlIHave]: ... @property @@ -116,6 +133,19 @@ class ControlMessage(google.protobuf.message.Message): def prune(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlPrune]: ... @property def idontwant(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlIDontWant]: ... + @property + def extensions(self) -> global___ControlExtensions: + """GossipSub v1.3: Extensions control message (MUST be in first message, + MUST NOT be sent more than once per peer). + Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md + """ + + @property + def observe(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlObserve]: + """Topic Observation extension control messages.""" + + @property + def unobserve(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlUnobserve]: ... def __init__( self, *, @@ -124,11 +154,43 @@ class ControlMessage(google.protobuf.message.Message): graft: collections.abc.Iterable[global___ControlGraft] | None = ..., prune: collections.abc.Iterable[global___ControlPrune] | None = ..., idontwant: collections.abc.Iterable[global___ControlIDontWant] | None = ..., + extensions: global___ControlExtensions | None = ..., + observe: collections.abc.Iterable[global___ControlObserve] | None = ..., + unobserve: collections.abc.Iterable[global___ControlUnobserve] | None = ..., ) -> None: ... - def ClearField(self, field_name: typing.Literal["graft", b"graft", "idontwant", b"idontwant", "ihave", b"ihave", "iwant", b"iwant", "prune", b"prune"]) -> None: ... + def HasField(self, field_name: typing.Literal["extensions", b"extensions"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["extensions", b"extensions", "graft", b"graft", "idontwant", b"idontwant", "ihave", b"ihave", "iwant", b"iwant", "observe", b"observe", "prune", b"prune", "unobserve", b"unobserve"]) -> None: ... global___ControlMessage = ControlMessage +@typing.final +class ControlExtensions(google.protobuf.message.Message): + """ControlExtensions advertises which v1.3 extensions the sending peer supports. + Peers MUST ignore unknown fields (forward-compatible per spec). + Field numbers for experimental extensions MUST be > 0x200000. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TOPICOBSERVATION_FIELD_NUMBER: builtins.int + TESTEXTENSION_FIELD_NUMBER: builtins.int + topicObservation: builtins.bool + """Set to true if the peer supports the Topic Observation extension.""" + testExtension: builtins.bool + """Experimental extensions use field numbers > 0x200000. + testExtension: used for cross-implementation interop testing (go-libp2p compat). + """ + def __init__( + self, + *, + topicObservation: builtins.bool | None = ..., + testExtension: builtins.bool | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["testExtension", b"testExtension", "topicObservation", b"topicObservation"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["testExtension", b"testExtension", "topicObservation", b"topicObservation"]) -> None: ... + +global___ControlExtensions = ControlExtensions + @typing.final class ControlIHave(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -220,6 +282,62 @@ class ControlIDontWant(google.protobuf.message.Message): global___ControlIDontWant = ControlIDontWant +@typing.final +class ControlObserve(google.protobuf.message.Message): + """ControlObserve: Topic Observation extension. + Sent by an observer to start receiving IHAVE notifications for a topic + without being a full subscriber. (GossipSub v1.3 Topic Observation extension) + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TOPICID_FIELD_NUMBER: builtins.int + topicID: builtins.str + def __init__( + self, + *, + topicID: builtins.str | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["topicID", b"topicID"]) -> None: ... + +global___ControlObserve = ControlObserve + +@typing.final +class ControlUnobserve(google.protobuf.message.Message): + """ControlUnobserve: Topic Observation extension. + Sent by an observer to stop receiving IHAVE notifications for a topic. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TOPICID_FIELD_NUMBER: builtins.int + topicID: builtins.str + def __init__( + self, + *, + topicID: builtins.str | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["topicID", b"topicID"]) -> None: ... + +global___ControlUnobserve = ControlUnobserve + +@typing.final +class TestExtension(google.protobuf.message.Message): + """TestExtension: used for interoperability testing of the v1.3 extension + mechanism between implementations (go-libp2p, rust-libp2p, py-libp2p). + An empty message — its presence on the wire is the signal. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___TestExtension = TestExtension + @typing.final class PeerInfo(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index a573e1d63..0682df185 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -11,11 +11,20 @@ import hashlib import logging import time +import typing from typing import ( + TYPE_CHECKING, NamedTuple, cast, ) +if TYPE_CHECKING: + from libp2p.pubsub.extensions import ExtensionsState as _ExtensionsState + + class _RouterWithExtensions(typing.Protocol): + extensions_state: _ExtensionsState + + import base58 import trio @@ -555,15 +564,35 @@ async def _handle_new_peer(self, peer_id: ID) -> None: logger.debug("fail to add new peer %s, error %s", peer_id, error) return - # Send hello packet + # Build hello packet. hello = self.get_hello_packet() + + # GossipSub v1.3 – Extensions Control Message injection. + # Per spec: "If a peer supports any extension, the Extensions control + # message MUST be included in the first message on the stream." + # We ask the router (if it is a v1.3-capable GossipSub router) to + # attach ControlExtensions to the hello packet before we serialise it. + # This is done via duck-typing so pubsub.py stays decoupled from + # gossipsub.py (matching the existing architecture). + negotiated_protocol = stream.get_protocol() + router = self.router + if hasattr(router, "extensions_state") and hasattr( + router, "supports_v13_features" + ): + # We pass the peer_id because extensions_state needs to track + # "sent_extensions" per peer for the at-most-once rule. + # cast() tells static type-checkers the narrowed type without + # creating a runtime dependency on gossipsub.py from pubsub.py. + v13_router = cast("_RouterWithExtensions", router) + hello = v13_router.extensions_state.build_hello_extensions(peer_id, hello) + try: await stream.write(encode_varint_prefixed(hello.SerializeToString())) except StreamClosed: logger.debug("Fail to add new peer %s: stream closed", peer_id) return try: - self.router.add_peer(peer_id, stream.get_protocol()) + self.router.add_peer(peer_id, negotiated_protocol) except Exception as error: logger.debug("fail to add new peer %s, error %s", peer_id, error) return From 6b1984b81ba197abf5ba4676bfb168db4a36e4de Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 21 Feb 2026 16:17:16 +0530 Subject: [PATCH 02/15] resolved merge conflict in imports --- libp2p/pubsub/pubsub.py | 52 +++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 0682df185..13e8f02cf 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -2,7 +2,6 @@ annotations, ) -import base64 from collections.abc import ( Callable, KeysView, @@ -11,21 +10,13 @@ import hashlib import logging import time -import typing from typing import ( - TYPE_CHECKING, NamedTuple, + Protocol, cast, ) -if TYPE_CHECKING: - from libp2p.pubsub.extensions import ExtensionsState as _ExtensionsState - - class _RouterWithExtensions(typing.Protocol): - extensions_state: _ExtensionsState - - -import base58 +import multibase import trio from libp2p.abc import ( @@ -44,6 +35,7 @@ class _RouterWithExtensions(typing.Protocol): TProtocol, ValidatorFn, ) +from libp2p.encoding_config import get_default_encoding from libp2p.exceptions import ( ParseError, ValidationError, @@ -67,6 +59,9 @@ class _RouterWithExtensions(typing.Protocol): PeerDataError, ) from libp2p.peer.peerstore import env_to_send_in_RPC +from libp2p.pubsub.extensions import ( + ExtensionsState, +) from libp2p.pubsub.utils import maybe_consume_signed_record from libp2p.tools.async_service import ( Service, @@ -94,10 +89,17 @@ class _RouterWithExtensions(typing.Protocol): signature_validator, ) + +class _RouterWithExtensions(Protocol): + """Protocol for a router that supports GossipSub v1.3 extensions.""" + + extensions_state: ExtensionsState + + # Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/40e1c94708658b155f30cf99e4574f384756d83c/topic.go#L97 # noqa: E501 SUBSCRIPTION_CHANNEL_SIZE = 32 -logger = logging.getLogger("libp2p.pubsub") +logger = logging.getLogger(__name__) def get_peer_and_seqno_msg_id(msg: rpc_pb2.Message) -> bytes: @@ -105,8 +107,21 @@ def get_peer_and_seqno_msg_id(msg: rpc_pb2.Message) -> bytes: return msg.seqno + msg.from_id -def get_content_addressed_msg_id(msg: rpc_pb2.Message) -> bytes: - return base64.b64encode(hashlib.sha256(msg.data).digest()) +def get_content_addressed_msg_id( + msg: rpc_pb2.Message, encoding: str | None = None +) -> bytes: + """ + Generate content-addressed message ID using multibase encoding. + + :param msg: Pubsub message + :param encoding: Encoding to use. When *None* the process-wide default + from :mod:`libp2p.encoding_config` is used. + :return: Multibase-encoded message ID + """ + if encoding is None: + encoding = get_default_encoding() + digest = hashlib.sha256(msg.data).digest() + return multibase.encode(encoding, digest) class TopicValidator(NamedTuple): @@ -583,7 +598,7 @@ async def _handle_new_peer(self, peer_id: ID) -> None: # "sent_extensions" per peer for the at-most-once rule. # cast() tells static type-checkers the narrowed type without # creating a runtime dependency on gossipsub.py from pubsub.py. - v13_router = cast("_RouterWithExtensions", router) + v13_router = cast(_RouterWithExtensions, router) hello = v13_router.extensions_state.build_hello_extensions(peer_id, hello) try: @@ -999,7 +1014,7 @@ async def push_msg(self, msg_forwarder: ID, msg: rpc_pb2.Message) -> None: msg_forwarder, msg.data.hex(), msg.topicIDs, - base58.b58encode(msg.from_id).decode(), + ID(msg.from_id).to_base58(), msg.seqno.hex(), ) return @@ -1017,10 +1032,7 @@ async def push_msg(self, msg_forwarder: ID, msg: rpc_pb2.Message) -> None: # reject messages claiming to be from ourselves but not locally published self_id = self.host.get_id() - if ( - base58.b58encode(msg.from_id).decode() == self_id - and msg_forwarder != self_id - ): + if ID(msg.from_id) == self_id and msg_forwarder != self_id: logger.debug( "dropping message claiming to be from self but forwarded from %s", msg_forwarder, From 0da32a1986bce668d53929bbeaefdb14f2aa17d8 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 21 Feb 2026 17:14:36 +0530 Subject: [PATCH 03/15] fixed pre commit hook and removed unused imports --- libp2p/pubsub/pubsub.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 671fa8bfa..13e8f02cf 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -118,8 +118,6 @@ def get_content_addressed_msg_id( from :mod:`libp2p.encoding_config` is used. :return: Multibase-encoded message ID """ - from libp2p.encoding_config import get_default_encoding - if encoding is None: encoding = get_default_encoding() digest = hashlib.sha256(msg.data).digest() From 8af34e47ac89e23f2e97abe28771b7466e3c6e65 Mon Sep 17 00:00:00 2001 From: IronJam11 Date: Sun, 22 Feb 2026 16:41:16 +0530 Subject: [PATCH 04/15] feat: add start and stop observing topics with topic observation state function completions --- libp2p/pubsub/extensions.py | 23 ++-------------- libp2p/pubsub/gossipsub.py | 53 +++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 49 deletions(-) diff --git a/libp2p/pubsub/extensions.py b/libp2p/pubsub/extensions.py index f2f2aa96f..abb397a29 100644 --- a/libp2p/pubsub/extensions.py +++ b/libp2p/pubsub/extensions.py @@ -475,38 +475,19 @@ def remove_peer(self, peer_id: ID) -> None: if not self._observing[topic]: del self._observing[topic] - # ------------------------------------------------------------------ - # TODO for contributor: - # Implement the following methods to complete the outbound observer path. - # ------------------------------------------------------------------ - def get_observing_topics(self) -> set[str]: """ Return the set of topics this node is currently observing (outbound). - Implementation hint: - Return ``set(self._observing.keys())``. - :return: set of topic strings we sent OBSERVE for. """ - # TODO: Return the set of topics from self._observing. - raise NotImplementedError( - "get_observing_topics() is left as an easy task for contributors. " - "Hint: return set(self._observing.keys())" - ) + return set(self._observing.keys()) def get_subscriber_peers_for_topic(self, topic: str) -> set[ID]: """ Return the set of subscriber peers we sent OBSERVE to for *topic*. - Implementation hint: - Return a copy of ``self._observing.get(topic, set())``. - :param topic: the topic to query. :return: set of subscriber peer IDs we are observing through. """ - # TODO: Return a copy of self._observing.get(topic, set()). - raise NotImplementedError( - "get_subscriber_peers_for_topic() is left as an easy task for " - "contributors. Hint: return set(self._observing.get(topic, set()))" - ) + return set(self._observing.get(topic, set())) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index e60404a99..b4763fc8e 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -1683,44 +1683,41 @@ async def start_observing_topic(self, topic: str) -> None: Internally it picks suitable subscriber peers and calls :meth:`emit_observe` for each of them. - Implementation hints: - 1. Check ``self.pubsub`` is not None. - 2. Get the peers subscribed to *topic* from - ``self.pubsub.peer_topics.get(topic, set())``. - 3. For each peer: only send OBSERVE if - ``self.supports_v13_features(peer)`` is True AND - ``self.extensions_state.both_support_topic_observation(peer)`` - is True. - 4. Await ``self.emit_observe(topic, peer)`` for each qualifying peer. - :param topic: The topic to start observing. """ - # TODO (contributor): implement start_observing_topic. - # See implementation hints in the docstring above. - raise NotImplementedError( - "start_observing_topic() is left as an easy task for contributors. " - "See the docstring for step-by-step implementation hints." - ) + if self.pubsub is None: + raise NoPubsubAttached + + peers_subscribed = self.pubsub.peer_topics.get(topic, set()) + for peer in peers_subscribed: + if self.supports_v13_features( + peer + ) and self.extensions_state.both_support_topic_observation(peer): + await self.emit_observe(topic, peer) + logger.debug( + "Started observing topic '%s' via peer %s.", + topic, + peer, + ) async def stop_observing_topic(self, topic: str) -> None: """ Stop observing *topic* by sending UNOBSERVE to all peers we previously sent OBSERVE to for *topic*. - Implementation hints: - 1. Get the set of peers we sent OBSERVE to via - ``self.topic_observation.get_subscriber_peers_for_topic(topic)``. - (Note: this calls the TODO method in TopicObservationState.) - 2. Await ``self.emit_unobserve(topic, peer)`` for each of them. - :param topic: The topic to stop observing. """ - # TODO (contributor): implement stop_observing_topic. - # See implementation hints in the docstring above. - raise NotImplementedError( - "stop_observing_topic() is left as an easy task for contributors. " - "See the docstring for step-by-step implementation hints." - ) + if self.pubsub is None: + raise NoPubsubAttached + + subscriber_peers = self.topic_observation.get_subscriber_peers_for_topic(topic) + for peer in subscriber_peers: + await self.emit_unobserve(topic, peer) + logger.debug( + "Stopped observing topic '%s' via peer %s.", + topic, + peer, + ) def _track_peer_ip(self, peer_id: ID) -> None: """ From b30aad336039b6ae90b058400cb88a551b19856a Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Tue, 3 Mar 2026 02:28:48 +0530 Subject: [PATCH 05/15] added test suite for gossipsub 1.3 extended --- .../pubsub/test_gossipsub_v1_3_extensions.py | 429 ++++++++++++++++++ 1 file changed, 429 insertions(+) create mode 100644 tests/core/pubsub/test_gossipsub_v1_3_extensions.py diff --git a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py new file mode 100644 index 000000000..0483d7926 --- /dev/null +++ b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py @@ -0,0 +1,429 @@ +import pytest +from unittest.mock import AsyncMock + +import trio + +from libp2p.peer.id import ID +from libp2p.pubsub.extensions import ( + ExtensionsState, + PeerExtensions, + TopicObservationState, +) +from libp2p.pubsub.gossipsub import ( + PROTOCOL_ID_V13, + GossipSub, +) +from libp2p.pubsub.pb import rpc_pb2 +from libp2p.pubsub.score import ScoreParams +from tests.utils.factories import ( + GossipsubFactory, + IDFactory, +) + + +# --------------------------------------------------------------------------- +# PeerExtensions tests +# --------------------------------------------------------------------------- + + +def test_peer_extensions_encode_decode_roundtrip() -> None: + """PeerExtensions should roundtrip via ControlExtensions.""" + ext = PeerExtensions(topic_observation=True, test_extension=True) + wire = ext.to_control_extensions() + + decoded = PeerExtensions.from_control_extensions(wire) + + assert decoded.topic_observation is True + assert decoded.test_extension is True + + +def test_peer_extensions_has_any() -> None: + """has_any() reflects whether at least one feature is enabled.""" + ext = PeerExtensions() + assert not ext.has_any() + + ext.topic_observation = True + assert ext.has_any() + + ext.topic_observation = False + ext.test_extension = True + assert ext.has_any() + + +# --------------------------------------------------------------------------- +# ExtensionsState tests +# --------------------------------------------------------------------------- + + +def _make_rpc_with_extensions( + *, topic_observation: bool = False, test_extension: bool = False +) -> rpc_pb2.RPC: + rpc = rpc_pb2.RPC() + control = rpc_pb2.ControlMessage() + wire = PeerExtensions( + topic_observation=topic_observation, + test_extension=test_extension, + ).to_control_extensions() + control.extensions.CopyFrom(wire) + rpc.control.CopyFrom(control) + return rpc + + +def test_build_hello_extensions_attaches_control_extensions() -> None: + """build_hello_extensions should attach ControlExtensions and mark peer as sent.""" + peer_id = IDFactory() + hello = rpc_pb2.RPC() + + state = ExtensionsState( + my_extensions=PeerExtensions(topic_observation=True, test_extension=True) + ) + + mutated = state.build_hello_extensions(peer_id, hello) + + assert mutated is hello + assert mutated.HasField("control") + assert mutated.control.HasField("extensions") + + ext = mutated.control.extensions + assert ext.topicObservation is True + assert ext.testExtension is True + assert state.sent_extensions_to(peer_id) is True + + +def test_build_hello_extensions_marks_sent_even_without_features() -> None: + """Even with no local extensions, sent_extensions should be tracked.""" + peer_id = IDFactory() + hello = rpc_pb2.RPC() + + state = ExtensionsState(my_extensions=PeerExtensions()) + + mutated = state.build_hello_extensions(peer_id, hello) + + assert mutated is hello + # No control.extensions should be present when we advertise nothing. + assert not mutated.HasField("control") + assert state.sent_extensions_to(peer_id) is True + + +def test_handle_rpc_records_peer_extensions_on_first_message() -> None: + """First RPC with extensions should record peer's advertised extensions.""" + peer_id = IDFactory() + state = ExtensionsState(my_extensions=PeerExtensions()) + + rpc = _make_rpc_with_extensions(topic_observation=True, test_extension=False) + + state.handle_rpc(rpc, peer_id) + + peer_ext = state.get_peer_extensions(peer_id) + assert isinstance(peer_ext, PeerExtensions) + assert peer_ext.topic_observation is True + assert peer_ext.test_extension is False + assert state.peer_supports_topic_observation(peer_id) is True + assert state.peer_supports_test_extension(peer_id) is False + + +def test_handle_rpc_duplicate_extensions_calls_misbehaviour_callback() -> None: + """Second RPC carrying extensions should trigger misbehaviour callback.""" + peer_id = IDFactory() + state = ExtensionsState(my_extensions=PeerExtensions()) + + calls: list[ID] = [] + + def report_misbehaviour(p: ID) -> None: + calls.append(p) + + state.set_report_misbehaviour(report_misbehaviour) + + rpc = _make_rpc_with_extensions(topic_observation=True) + + # First RPC: records extensions. + state.handle_rpc(rpc, peer_id) + assert calls == [] + + # Second RPC: duplicate extensions -> misbehaviour. + state.handle_rpc(rpc, peer_id) + assert calls == [peer_id] + + +def test_extensions_state_remove_peer_clears_state() -> None: + """remove_peer should clear both sent and received extension state.""" + peer_id = IDFactory() + state = ExtensionsState(my_extensions=PeerExtensions(topic_observation=True)) + rpc = _make_rpc_with_extensions(topic_observation=True) + + state.build_hello_extensions(peer_id, rpc_pb2.RPC()) + state.handle_rpc(rpc, peer_id) + + assert state.sent_extensions_to(peer_id) + assert state.get_peer_extensions(peer_id) is not None + + state.remove_peer(peer_id) + + assert not state.sent_extensions_to(peer_id) + assert state.get_peer_extensions(peer_id) is None + + +def test_both_support_topic_observation_query() -> None: + """both_support_topic_observation returns True only when both sides advertise it.""" + peer_id = IDFactory() + state = ExtensionsState(my_extensions=PeerExtensions(topic_observation=True)) + + # Peer without topicObservation support. + rpc_no = _make_rpc_with_extensions(topic_observation=False) + state.handle_rpc(rpc_no, peer_id) + assert not state.both_support_topic_observation(peer_id) + + # Overwrite with a peer that does support topicObservation. + rpc_yes = _make_rpc_with_extensions(topic_observation=True) + state.handle_rpc(rpc_yes, peer_id) + assert state.both_support_topic_observation(peer_id) + + +def test_gossipsub_report_extensions_misbehaviour_penalizes_behavior() -> None: + """GossipSub._report_extensions_misbehaviour must call scorer.penalize_behavior.""" + score_params = ScoreParams( + p5_behavior_penalty_weight=2.0, + p5_behavior_penalty_threshold=0.0, + p5_behavior_penalty_decay=1.0, + ) + router = GossipsubFactory(score_params=score_params) + assert isinstance(router, GossipSub) + assert router.scorer is not None + + peer_id = IDFactory() + + penalize_calls: list[tuple[ID, float]] = [] + original_penalize = router.scorer.penalize_behavior + + def wrapped_penalize(p: ID, amount: float = 1.0) -> None: + penalize_calls.append((p, amount)) + original_penalize(p, amount) + + router.scorer.penalize_behavior = wrapped_penalize # type: ignore[assignment] + + router._report_extensions_misbehaviour(peer_id) + + assert penalize_calls == [(peer_id, 1.0)] + + +# --------------------------------------------------------------------------- +# TopicObservationState tests +# --------------------------------------------------------------------------- + + +def test_topic_observation_state_observing_and_observers() -> None: + """TopicObservationState should track observing topics and observers correctly.""" + state = TopicObservationState() + topic = "test-topic" + observer = IDFactory() + subscriber = IDFactory() + + # Outbound observing. + assert not state.is_observing(topic) + state.add_observing(topic, subscriber) + assert state.is_observing(topic) + assert state.get_subscriber_peers_for_topic(topic) == {subscriber} + + state.remove_observing(topic, subscriber) + assert not state.is_observing(topic) + assert state.get_subscriber_peers_for_topic(topic) == set() + + # Inbound observers. + assert state.get_observers(topic) == set() + state.add_observer(topic, observer) + assert state.get_observers(topic) == {observer} + + state.remove_observer(topic, observer) + assert state.get_observers(topic) == set() + + +def test_topic_observation_state_remove_peer_clears_all_state() -> None: + """remove_peer should drop a peer from both observing and observer maps.""" + state = TopicObservationState() + topic1 = "topic-1" + topic2 = "topic-2" + peer = IDFactory() + + state.add_observing(topic1, peer) + state.add_observer(topic2, peer) + + state.remove_peer(peer) + + assert state.get_observing_topics() == set() + assert state.get_observers(topic1) == set() + assert state.get_observers(topic2) == set() + + +# --------------------------------------------------------------------------- +# GossipSub v1.3 wiring tests (topic observation + notify observers) +# --------------------------------------------------------------------------- + + +def test_supports_v13_features_based_on_protocol() -> None: + """supports_v13_features should be true only for v1.3+ peers.""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + v13_peer = IDFactory() + v12_peer = IDFactory() + + router.add_peer(v13_peer, PROTOCOL_ID_V13) + # Reuse PROTOCOL_ID_V13 constant to ensure we don't regress the set of + # supported protocols in _get_in_topic_gossipsub_peers_from_minus. + from libp2p.pubsub.gossipsub import PROTOCOL_ID_V12 + + router.add_peer(v12_peer, PROTOCOL_ID_V12) + + assert router.supports_v13_features(v13_peer) is True + assert router.supports_v13_features(v12_peer) is False + + +@pytest.mark.trio +async def test_handle_observe_and_unobserve_manage_observers() -> None: + """handle_observe / handle_unobserve should add and remove observers.""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + topic = "obs-topic" + observer_peer = IDFactory() + + # Simulate that the peer advertised topicObservation support via extensions. + router.extensions_state._peer_extensions[observer_peer] = PeerExtensions( + topic_observation=True + ) + + observe_msg = rpc_pb2.ControlObserve(topicID=topic) + await router.handle_observe(observe_msg, observer_peer) + + assert observer_peer in router.topic_observation.get_observers(topic) + + unobserve_msg = rpc_pb2.ControlUnobserve(topicID=topic) + await router.handle_unobserve(unobserve_msg, observer_peer) + + assert observer_peer not in router.topic_observation.get_observers(topic) + + +@pytest.mark.trio +async def test_handle_observe_ignored_when_peer_did_not_advertise_extension() -> None: + """Peers that did not advertise topicObservation must not become observers.""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + topic = "obs-topic" + observer_peer = IDFactory() + + # Peer exists, but its advertised extensions do NOT include topicObservation. + router.extensions_state._peer_extensions[observer_peer] = PeerExtensions( + topic_observation=False + ) + + observe_msg = rpc_pb2.ControlObserve(topicID=topic) + await router.handle_observe(observe_msg, observer_peer) + + assert router.topic_observation.get_observers(topic) == set() + + +@pytest.mark.trio +async def test_emit_observe_and_unobserve_update_observing_state() -> None: + """emit_observe / emit_unobserve should update TopicObservationState (outbound).""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + topic = "obs-topic" + subscriber_peer = IDFactory() + + # Stub pubsub.peers so emit_control_message sees the peer as connected. + class DummyPubsub: + def __init__(self) -> None: + self.peers: dict[ID, object] = {subscriber_peer: object()} + + router.pubsub = DummyPubsub() # type: ignore[assignment] + + # Avoid writing to a real stream; we only care about state updates. + router.emit_control_message = AsyncMock() # type: ignore[assignment] + + assert not router.topic_observation.is_observing(topic) + + await router.emit_observe(topic, subscriber_peer) + assert router.topic_observation.is_observing(topic) + assert router.topic_observation.get_subscriber_peers_for_topic(topic) == { + subscriber_peer + } + + await router.emit_unobserve(topic, subscriber_peer) + assert not router.topic_observation.is_observing(topic) + assert router.topic_observation.get_subscriber_peers_for_topic(topic) == set() + + +@pytest.mark.trio +async def test_notify_observers_sends_ihave_to_each_observer() -> None: + """_notify_observers should call emit_ihave for each observer with the msg_id.""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + topic = "obs-topic" + observer_peer = IDFactory() + msg_id = b"message-id" + + # Configure TopicObservationState with a single observer. + router.topic_observation.add_observer(topic, observer_peer) + + # Stub pubsub with peers map so observers are considered connected. + class DummyPubsub: + def __init__(self) -> None: + self.peers: dict[ID, object] = {observer_peer: object()} + + router.pubsub = DummyPubsub() # type: ignore[assignment] + + # Capture IHAVE emissions. + router.emit_ihave = AsyncMock() # type: ignore[assignment] + + await router._notify_observers([topic], msg_id) + + # emit_ihave(topic, [str(msg_id)], observer_peer) is expected. + router.emit_ihave.assert_awaited_once() + called_topic, called_msg_ids, called_peer = router.emit_ihave.call_args.args + assert called_topic == topic + assert called_peer == observer_peer + assert called_msg_ids == [str(msg_id)] + + +@pytest.mark.trio +async def test_start_and_stop_observing_topic_high_level_api() -> None: + """start_observing_topic / stop_observing_topic should delegate to OBSERVE/UNOBSERVE.""" + router = GossipsubFactory() + assert isinstance(router, GossipSub) + + topic = "obs-topic" + subscriber_peer = IDFactory() + + # Simulate pubsub state: subscriber_peer is subscribed to topic. + class DummyPubsub: + def __init__(self) -> None: + self.peer_topics = {topic: {subscriber_peer}} + + router.pubsub = DummyPubsub() # type: ignore[assignment] + + # Ensure the peer negotiated v1.3 and both sides support topicObservation. + router.peer_protocol[subscriber_peer] = PROTOCOL_ID_V13 + router.extensions_state.my_extensions.topic_observation = True + router.extensions_state._peer_extensions[subscriber_peer] = PeerExtensions( + topic_observation=True + ) + + # Avoid touching real network; just record control messages. + router.emit_control_message = AsyncMock() # type: ignore[assignment] + + assert not router.topic_observation.is_observing(topic) + + await router.start_observing_topic(topic) + # After OBSERVE, we should be tracking the topic as "observing". + assert router.topic_observation.is_observing(topic) + assert router.topic_observation.get_subscriber_peers_for_topic(topic) == { + subscriber_peer + } + + await router.stop_observing_topic(topic) + assert not router.topic_observation.is_observing(topic) + assert router.topic_observation.get_subscriber_peers_for_topic(topic) == set() + From 01b6abf13fa1203b961d1726d003ac370373de89 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 7 Mar 2026 21:55:19 +0530 Subject: [PATCH 06/15] fix: resolve GossipSub v1.3 merge conflicts with main, keep v1.3 proto and Topic Observation, add safe_bytes_from_hex and fix tests --- libp2p/pubsub/gossipsub.py | 725 ++++++++++++++++-- libp2p/pubsub/utils.py | 13 + .../pubsub/test_gossipsub_v1_3_extensions.py | 20 +- 3 files changed, 663 insertions(+), 95 deletions(-) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index b4763fc8e..d06050f29 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -2,6 +2,8 @@ defaultdict, ) from collections.abc import ( + Awaitable, + Callable, Iterable, Sequence, ) @@ -39,6 +41,9 @@ from libp2p.tools.async_service import ( Service, ) +from libp2p.utils.multiaddr_utils import ( + extract_ip_from_multiaddr as extract_ip_from_multiaddr_util, +) from .exceptions import ( NoPubsubAttached, @@ -63,7 +68,7 @@ ) from .utils import ( parse_message_id_safe, - safe_parse_message_id, + safe_bytes_from_hex, ) PROTOCOL_ID = TProtocol("/meshsub/1.0.0") @@ -72,6 +77,7 @@ # GossipSub v1.3: Extensions Control Message # Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md PROTOCOL_ID_V13 = TProtocol("/meshsub/1.3.0") +PROTOCOL_ID_V14 = TProtocol("/meshsub/1.4.0") PROTOCOL_ID_V20 = TProtocol("/meshsub/2.0.0") logger = logging.getLogger(__name__) @@ -134,8 +140,8 @@ class GossipSub(IPubsubRouter, Service): message_rate_limits: dict[ID, dict[str, list[float]]] # peer -> topic -> timestamps max_messages_per_topic_per_second: float equivocation_detection: dict[ - tuple[bytes, bytes], rpc_pb2.Message - ] # (seqno, from) -> first_msg + bytes, rpc_pb2.Message + ] # msg_id (from_id + seqno) -> first_msg eclipse_protection_enabled: bool min_mesh_diversity_ips: int # Minimum number of different IPs in mesh @@ -193,6 +199,7 @@ def __init__( # Create heartbeat timer self.heartbeat_initial_delay = heartbeat_initial_delay self.heartbeat_interval = heartbeat_interval + self.heartbeat_interval_base = heartbeat_interval # For adaptive adjustment # Create direct peers self.direct_peers = dict() @@ -239,6 +246,18 @@ def __init__( self.gossip_factor = 0.25 # Default gossip factor self.last_health_update = int(time.time()) + # Enhanced v1.4 adaptive metrics + self.message_delivery_success_rate = 1.0 + self.average_peer_score = 0.0 + self.mesh_stability_score = 1.0 + self.connection_churn_rate = 0.0 + self.last_metrics_update = int(time.time()) + + # Tracking for adaptive calculations + self.recent_message_deliveries: dict[str, list[float]] = defaultdict(list) + self.recent_peer_connections: list[float] = [] + self.recent_peer_disconnections: list[float] = [] + # Security features self.spam_protection_enabled = spam_protection_enabled self.message_rate_limits = defaultdict(lambda: defaultdict(list)) @@ -247,6 +266,28 @@ def __init__( self.eclipse_protection_enabled = eclipse_protection_enabled self.min_mesh_diversity_ips = min_mesh_diversity_ips + # Extensions support (v1.3+) + self.extension_handlers: dict[str, Callable[[bytes, ID], Awaitable[None]]] = {} + + # Rate limiting for v1.4 features + self.iwant_request_limits: dict[ID, dict[str, list[float]]] = defaultdict( + lambda: defaultdict(list) + ) + self.ihave_message_limits: dict[ID, dict[str, list[float]]] = defaultdict( + lambda: defaultdict(list) + ) + self.graft_flood_tracking: dict[ID, dict[str, float]] = defaultdict( + lambda: defaultdict(float) + ) + + # v1.4 rate limiting parameters + self.max_iwant_requests_per_second: float = 10.0 + self.max_ihave_messages_per_second: float = 10.0 + self.graft_flood_threshold: float = 10.0 # seconds + + # v1.4 adaptive gossip parameters + self.opportunistic_graft_threshold: float = 0.5 + def supports_scoring(self, peer_id: ID) -> bool: """ Check if peer supports Gossipsub v1.1+ scoring features. @@ -258,6 +299,7 @@ def supports_scoring(self, peer_id: ID) -> bool: PROTOCOL_ID_V11, PROTOCOL_ID_V12, PROTOCOL_ID_V13, + PROTOCOL_ID_V14, PROTOCOL_ID_V20, ) @@ -275,6 +317,7 @@ def supports_v13_features(self, peer_id: ID) -> bool: """ return self.peer_protocol.get(peer_id) in ( PROTOCOL_ID_V13, + PROTOCOL_ID_V14, PROTOCOL_ID_V20, ) @@ -301,6 +344,162 @@ def get_protocols(self) -> list[TProtocol]: """ return self.protocols + def supports_protocol_feature(self, peer_id: ID, feature: str) -> bool: + """ + Check if a peer supports a specific protocol feature based on its + supported protocol versions. + + :param peer_id: ID of the peer to check + :param feature: Feature name to check support for + :return: True if the peer supports the feature, False otherwise + """ + if peer_id not in self.peer_protocol: + return False + + protocol = self.peer_protocol[peer_id] + + # Define feature support by protocol version + if feature == "px": # Peer Exchange + return protocol in ( + PROTOCOL_ID_V11, + PROTOCOL_ID_V12, + PROTOCOL_ID_V13, + PROTOCOL_ID_V14, + ) + elif feature == "idontwant": # IDONTWANT message + return protocol in (PROTOCOL_ID_V12, PROTOCOL_ID_V13, PROTOCOL_ID_V14) + elif feature == "extensions": # Extensions control message + return protocol in (PROTOCOL_ID_V13, PROTOCOL_ID_V14) + elif feature == "adaptive_gossip": # Adaptive gossip parameters + return protocol == PROTOCOL_ID_V14 + elif feature == "scoring": # Peer scoring system + return protocol in ( + PROTOCOL_ID_V11, + PROTOCOL_ID_V12, + PROTOCOL_ID_V13, + PROTOCOL_ID_V14, + ) + elif feature == "extended_scoring": # Extended peer scoring (P5-P7) + return protocol == PROTOCOL_ID_V14 + + # Default to not supported for unknown features + return False + + def register_extension_handler( + self, extension_name: str, handler: Callable[[bytes, ID], Awaitable[None]] + ) -> None: + """ + Register a handler for a specific extension. + + :param extension_name: Name of the extension + :param handler: Async callable that takes (data: bytes, sender_peer_id: ID) + """ + self.extension_handlers[extension_name] = handler + logger.debug("Registered handler for extension: %s", extension_name) + + def unregister_extension_handler(self, extension_name: str) -> None: + """ + Unregister a handler for a specific extension. + + :param extension_name: Name of the extension + """ + if extension_name in self.extension_handlers: + del self.extension_handlers[extension_name] + logger.debug("Unregistered handler for extension: %s", extension_name) + + async def emit_extension( + self, extension_name: str, data: bytes, to_peer: ID + ) -> None: + """ + Emit an extension message to a peer. + + In GossipSub v1.3 wire format, the Extensions control message is sent + only once in the first stream message (hello). Arbitrary extension + name/data messages after that are not part of the v1.3 spec. This method + is a no-op for compatibility with the extension_handlers API. + + :param extension_name: Name of the extension + :param data: Extension data + :param to_peer: Target peer ID + """ + logger.debug( + "emit_extension(%s, ...) called: v1.3 wire format only sends Extensions " + "in the first hello; skipping.", + extension_name, + ) + + def _check_iwant_rate_limit(self, peer_id: ID) -> bool: + """ + Check if peer has exceeded IWANT request rate limit. + + :param peer_id: The peer to check + :return: True if within rate limit, False if exceeded + """ + current_time = time.time() + timestamps = self.iwant_request_limits[peer_id]["requests"] + + # Remove old timestamps (older than 1 second) + cutoff_time = current_time - 1.0 + timestamps[:] = [t for t in timestamps if t > cutoff_time] + + # Check if rate limit exceeded + if len(timestamps) >= self.max_iwant_requests_per_second: + # Apply penalty for IWANT spam + if hasattr(self, "scorer") and self.scorer is not None: + self.scorer.penalize_iwant_spam(peer_id, 5.0) + return False + + # Add current timestamp + timestamps.append(current_time) + return True + + def _check_ihave_rate_limit(self, peer_id: ID, topic: str) -> bool: + """ + Check if peer has exceeded IHAVE message rate limit for a topic. + + :param peer_id: The peer to check + :param topic: The topic to check + :return: True if within rate limit, False if exceeded + """ + current_time = time.time() + timestamps = self.ihave_message_limits[peer_id][topic] + + # Remove old timestamps (older than 1 second) + cutoff_time = current_time - 1.0 + timestamps[:] = [t for t in timestamps if t > cutoff_time] + + # Check if rate limit exceeded + if len(timestamps) >= self.max_ihave_messages_per_second: + # Apply penalty for IHAVE spam + if hasattr(self, "scorer") and self.scorer is not None: + self.scorer.penalize_ihave_spam(peer_id, 5.0) + return False + + # Add current timestamp + timestamps.append(current_time) + return True + + def _check_graft_flood_protection(self, peer_id: ID, topic: str) -> bool: + """ + Check for GRAFT flood protection (P7 behavioral penalty). + + :param peer_id: The peer to check + :param topic: The topic to check + :return: True if no flood detected, False if flood detected + """ + current_time = time.time() + last_prune_time = self.graft_flood_tracking[peer_id].get(topic, 0.0) + + # Use the smaller of graft_flood_threshold and prune_back_off so that + # peers can re-graft after the configured backoff period. + threshold = min(self.graft_flood_threshold, float(self.prune_back_off)) + + # Check if GRAFT comes too soon after PRUNE (flood threshold) + if current_time - last_prune_time < threshold: + return False + + return True + def attach(self, pubsub: Pubsub) -> None: """ Attach is invoked by the PubSub constructor to attach the router to a @@ -335,6 +534,7 @@ def add_peer(self, peer_id: ID, protocol_id: TProtocol | None) -> None: PROTOCOL_ID_V11, PROTOCOL_ID_V12, PROTOCOL_ID_V13, + PROTOCOL_ID_V14, PROTOCOL_ID_V20, floodsub.PROTOCOL_ID, ): @@ -353,6 +553,10 @@ def add_peer(self, peer_id: ID, protocol_id: TProtocol | None) -> None: if self.scorer is not None and self.pubsub is not None: self._track_peer_ip(peer_id) + # Track connection for adaptive gossip metrics (only when enabled) + if self.adaptive_gossip_enabled: + self.recent_peer_connections.append(time.time()) + def remove_peer(self, peer_id: ID) -> None: """ Notifies the router that a peer has been disconnected. @@ -384,6 +588,10 @@ def remove_peer(self, peer_id: ID) -> None: # Topic Observation: clean up observer / observing state for this peer self.topic_observation.remove_peer(peer_id) + # Track disconnection for adaptive gossip metrics (only when enabled) + if self.adaptive_gossip_enabled: + self.recent_peer_disconnections.append(time.time()) + async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: """ Invoked to process control messages in the RPC envelope. @@ -422,6 +630,7 @@ async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: if control_message.idontwant: for idontwant in control_message.idontwant: await self.handle_idontwant(idontwant, sender_peer_id) + # v1.3 Extensions control message is processed above via extensions_state.handle_rpc() # GossipSub v1.3 – Topic Observation extension if self.supports_v13_features(sender_peer_id): @@ -455,7 +664,7 @@ async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: msg_id = self.pubsub.get_message_id(pubsub_msg) else: # Fallback to default ID construction - msg_id = pubsub_msg.seqno + pubsub_msg.from_id + msg_id = pubsub_msg.from_id + pubsub_msg.seqno peers_gen = self._get_peers_to_send( pubsub_msg.topicIDs, @@ -497,6 +706,8 @@ async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: for topic in pubsub_msg.topicIDs: self.time_since_last_publish[topic] = int(time.time()) + # Track message delivery for adaptive metrics + self.recent_message_deliveries[topic].append(time.time()) def _get_peers_to_send( self, @@ -767,8 +978,8 @@ async def heartbeat(self) -> None: self._periodic_security_cleanup() # Perform ongoing mesh quality maintenance (v2.0 feature) - for topic in self.mesh: - self._maintain_mesh_quality(topic) + for topic in list(self.mesh): + await self._maintain_mesh_quality(topic) # Prune old IDONTWANT entries to prevent memory leaks self._prune_idontwant_entries() @@ -917,7 +1128,7 @@ def _handle_topic_heartbeat( peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus( topic, gossip_count, current_peers, True ) - msg_id_strs = [str(msg_id) for msg_id in msg_ids] + msg_id_strs = [msg_id.hex() for msg_id in msg_ids] for peer in peers_to_emit_ihave_to: peers_to_gossip[peer][topic] = msg_id_strs @@ -980,6 +1191,8 @@ def select_from_minus( # If num_to_select > size(selection_pool), then return selection_pool (which has # the most possible elements s.t. the number of elements is less than # num_to_select) + if num_to_select <= 0: + return [] if num_to_select >= len(selection_pool): return selection_pool @@ -1006,6 +1219,7 @@ def _get_in_topic_gossipsub_peers_from_minus( PROTOCOL_ID_V11, PROTOCOL_ID_V12, PROTOCOL_ID_V13, + PROTOCOL_ID_V14, PROTOCOL_ID_V20, ) } @@ -1144,23 +1358,37 @@ async def _do_px(self, px_peers: list[rpc_pb2.PeerInfo]) -> None: async def handle_ihave( self, ihave_msg: rpc_pb2.ControlIHave, sender_peer_id: ID ) -> None: - """Checks the seen set and requests unknown messages with an IWANT message.""" + """ + Checks the seen set and requests unknown messages with an IWANT message. + + Enhanced with rate limiting for GossipSub v1.4. + """ + # Rate limiting check for IHAVE messages + if not self._check_ihave_rate_limit(sender_peer_id, ihave_msg.topicID): + logger.warning( + "IHAVE rate limit exceeded for peer %s on topic %s, ignoring message", + sender_peer_id, + ihave_msg.topicID, + ) + return if self.pubsub is None: raise NoPubsubAttached - # Get list of all seen (seqnos, from) from the (seqno, from) tuples in - # seen_messages cache - seen_seqnos_and_peers = [ - str(seqno_and_from) - for seqno_and_from in self.pubsub.seen_messages.cache.keys() - ] - - # Add all unknown message ids (ids that appear in ihave_msg but not in - # seen_seqnos) to list of messages we want to request - msg_ids_wanted: list[MessageID] = [ - parse_message_id_safe(msg_id) - for msg_id in ihave_msg.messageIDs - if msg_id not in seen_seqnos_and_peers - ] + pubsub = self.pubsub + + # Add all unknown message ids (ids that appear in ihave_msg but not + # already seen) to list of messages we want to request + msg_ids_wanted: list[MessageID] = [] + for msg_id in ihave_msg.messageIDs: + mid_bytes = safe_bytes_from_hex(msg_id) + if mid_bytes is None: + logger.warning( + "Received invalid hex message ID in IHAVE from %s: %r", + sender_peer_id, + msg_id, + ) + continue + if not pubsub.seen_messages.has(mid_bytes): + msg_ids_wanted.append(parse_message_id_safe(msg_id)) # Request messages with IWANT message if msg_ids_wanted: @@ -1172,10 +1400,28 @@ async def handle_iwant( """ Forwards all request messages that are present in mcache to the requesting peer. + + Enhanced with rate limiting for GossipSub v1.4. """ - msg_ids: list[tuple[bytes, bytes]] = [ - safe_parse_message_id(msg) for msg in iwant_msg.messageIDs - ] + # Rate limiting check for IWANT messages + if not self._check_iwant_rate_limit(sender_peer_id): + logger.warning( + "IWANT rate limit exceeded for peer %s, ignoring request", + sender_peer_id, + ) + return + + msg_ids: list[bytes] = [] + for msg_id_str in iwant_msg.messageIDs: + mid_bytes = safe_bytes_from_hex(msg_id_str) + if mid_bytes is None: + logger.warning( + "Received invalid hex message ID in IWANT from %s: %r", + sender_peer_id, + msg_id_str, + ) + continue + msg_ids.append(mid_bytes) msgs_to_forward: list[rpc_pb2.Message] = [] for msg_id_iwant in msg_ids: # Check if the wanted message ID is present in mcache @@ -1222,6 +1468,18 @@ async def handle_graft( ) -> None: topic: str = graft_msg.topicID + # GRAFT flood protection (v1.4 feature) + if not self._check_graft_flood_protection(sender_peer_id, topic): + logger.warning( + "GRAFT flood detected from peer %s for topic %s, applying penalty", + sender_peer_id, + topic, + ) + if self.scorer is not None: + self.scorer.penalize_graft_flood(sender_peer_id, 10.0) + await self.emit_prune(topic, sender_peer_id, False, False) + return + # Score gate for GRAFT acceptance if self.scorer is not None: if self.scorer.is_graylisted(sender_peer_id, [topic]): @@ -1278,6 +1536,9 @@ async def handle_prune( if self.scorer is not None: self.scorer.on_leave_mesh(sender_peer_id, topic) + # Track PRUNE time for GRAFT flood protection + self.graft_flood_tracking[sender_peer_id][topic] = time.time() + if px_peers: # Score-gate PX acceptance allow_px = True @@ -1295,6 +1556,7 @@ def pack_control_msgs( prune_msgs: list[rpc_pb2.ControlPrune] | None, idontwant_msgs: list[rpc_pb2.ControlIDontWant] | None = None, ) -> rpc_pb2.ControlMessage: + """Pack control messages. v1.3 Extensions are set only in the first hello.""" control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage() if ihave_msgs: control_msg.ihave.extend(ihave_msgs) @@ -1433,7 +1695,8 @@ async def _emit_idontwant_for_message( v12_plus_peers = { peer_id for peer_id in mesh_peers - if self.peer_protocol.get(peer_id) in (PROTOCOL_ID_V12, PROTOCOL_ID_V20) + if self.peer_protocol.get(peer_id) + in (PROTOCOL_ID_V12, PROTOCOL_ID_V13, PROTOCOL_ID_V14, PROTOCOL_ID_V20) } if not v12_plus_peers: @@ -1739,23 +2002,44 @@ def _track_peer_ip(self, peer_id: ID) -> None: if conn is not None and hasattr(conn, "remote_addr"): remote_addr = getattr(conn, "remote_addr", None) if remote_addr: - # Extract IP from multiaddr - ip_str = self._extract_ip_from_multiaddr(str(remote_addr)) + ip_str = self._extract_ip_from_remote_addr(remote_addr) if ip_str: self.scorer.add_peer_ip(peer_id, ip_str) except Exception as e: logger.debug("Failed to track IP for peer %s: %s", peer_id, e) - def _extract_ip_from_multiaddr(self, multiaddr_str: str) -> str | None: + def _extract_ip_from_remote_addr(self, remote_addr: Any) -> str | None: + """ + Extract IP address from a remote address (Multiaddr, (host, port), or string). + + Uses libp2p.utils.multiaddr_utils when possible for consistency. + """ + try: + from multiaddr import Multiaddr + + if isinstance(remote_addr, Multiaddr): + return extract_ip_from_multiaddr_util(remote_addr) + if isinstance(remote_addr, (tuple, list)) and len(remote_addr) >= 1: + return str(remote_addr[0]) + addr_str = str(remote_addr) + # Try parsing as multiaddr and use shared util + try: + maddr = Multiaddr(addr_str) + return extract_ip_from_multiaddr_util(maddr) + except Exception: + pass + return self._extract_ip_from_multiaddr_str(addr_str) + except Exception: + return None + + def _extract_ip_from_multiaddr_str(self, multiaddr_str: str) -> str | None: """ - Extract IP address from a multiaddr string. + Extract IP from multiaddr string (fallback when Multiaddr parse fails). :param multiaddr_str: The multiaddr string :return: The IP address or None if extraction fails """ try: - # Simple extraction for common cases like /ip4/127.0.0.1/tcp/4001 - # or /ip6/::1/tcp/4001 parts = multiaddr_str.split("/") for i, part in enumerate(parts): if part in ("ip4", "ip6") and i + 1 < len(parts): @@ -1805,18 +2089,31 @@ def _update_network_health(self) -> None: connectivity_health /= total_topics - # Calculate peer score health (0.0 to 1.0) + # Calculate additional health metrics score_health = self._calculate_peer_score_health() - - # Combine metrics (weighted average) - self.network_health_score = 0.6 * connectivity_health + 0.4 * score_health + delivery_health = self._calculate_message_delivery_health() + stability_health = self._calculate_mesh_stability_health() + churn_health = self._calculate_connection_churn_health() + + # Combine metrics (weighted average with v1.4 enhancements) + self.network_health_score = ( + 0.3 * connectivity_health + + 0.25 * score_health + + 0.2 * delivery_health + + 0.15 * stability_health + + 0.1 * churn_health + ) self.network_health_score = max(0.0, min(1.0, self.network_health_score)) logger.debug( - "Network health updated: %.2f (connectivity: %.2f, scores: %.2f)", + "Network health updated: %.2f (connectivity: %.2f, scores: %.2f, ", + "delivery: %.2f, stability: %.2f, churn: %.2f)", self.network_health_score, connectivity_health, score_health, + delivery_health, + stability_health, + churn_health, ) except Exception as e: @@ -1841,7 +2138,7 @@ def _calculate_peer_score_health(self) -> float: all_mesh_peers.update(peers) if not all_mesh_peers: - return 1.0 + return 0.0 # No peers means poor health # Calculate average score total_score = 0.0 @@ -1860,37 +2157,177 @@ def _calculate_peer_score_health(self) -> float: except Exception: return 0.5 + def _calculate_message_delivery_health(self) -> float: + """ + Calculate health based on message delivery success rate. + + :return: Health score from 0.0 to 1.0 + """ + try: + current_time = time.time() + cutoff_time = current_time - 60.0 # Look at last minute + + total_deliveries = 0 + successful_deliveries = 0 + + for topic, delivery_times in self.recent_message_deliveries.items(): + # Clean old entries + delivery_times[:] = [t for t in delivery_times if t > cutoff_time] + + # Count deliveries (simplified - in real implementation, + # track success/failure separately) + total_deliveries += len(delivery_times) + successful_deliveries += len( + delivery_times + ) # Assume all tracked are successful + + if total_deliveries == 0: + # If we have no deliveries but have mesh peers, assume moderate health + # If we have no mesh peers, return poor health + total_mesh_peers = sum(len(peers) for peers in self.mesh.values()) + if total_mesh_peers == 0: + return 0.0 # No mesh peers means poor health + return 0.5 # No delivery data but have peers, assume moderate + + self.message_delivery_success_rate = ( + successful_deliveries / total_deliveries + ) + return self.message_delivery_success_rate + + except Exception: + return 0.5 + + def _calculate_mesh_stability_health(self) -> float: + """ + Calculate health based on mesh stability (low churn is good). + + :return: Health score from 0.0 to 1.0 + """ + try: + # Simple stability metric: ratio of stable connections + total_mesh_peers = sum(len(peers) for peers in self.mesh.values()) + + if total_mesh_peers == 0: + return 0.0 # No mesh peers means poor stability + + # In a real implementation, track mesh changes over time + # For now, use a simple heuristic based on mesh size vs target + stability_ratio = 0.0 + topic_count = len(self.mesh) + + if topic_count > 0: + for topic, peers in self.mesh.items(): + target_size = self.degree + actual_size = len(peers) + + if actual_size == 0: + topic_stability = 0.0 + else: + # Stability is higher when actual size is close to target + size_ratio = min( + actual_size / target_size, target_size / actual_size + ) + topic_stability = size_ratio + + stability_ratio += topic_stability + + self.mesh_stability_score = stability_ratio / topic_count + else: + self.mesh_stability_score = 1.0 + + return self.mesh_stability_score + + except Exception: + return 0.5 + + def _calculate_connection_churn_health(self) -> float: + """ + Calculate health based on connection churn rate (low churn is good). + + :return: Health score from 0.0 to 1.0 + """ + try: + current_time = time.time() + window_size = 60.0 # 1 minute window + cutoff_time = current_time - window_size + + # Clean old entries + self.recent_peer_connections[:] = [ + t for t in self.recent_peer_connections if t > cutoff_time + ] + self.recent_peer_disconnections[:] = [ + t for t in self.recent_peer_disconnections if t > cutoff_time + ] + + connections = len(self.recent_peer_connections) + disconnections = len(self.recent_peer_disconnections) + total_churn = connections + disconnections + + # Calculate churn rate (events per minute) + churn_rate = total_churn / (window_size / 60.0) + self.connection_churn_rate = churn_rate + + # Health decreases with higher churn rate + # Assume 10 events/minute is high churn, 0 is perfect + max_acceptable_churn = 10.0 + health = max(0.0, 1.0 - (churn_rate / max_acceptable_churn)) + + return health + + except Exception: + return 0.5 + def _adapt_gossip_parameters(self) -> None: """ Adapt gossip parameters based on network health. + Enhanced v1.4 version with more sophisticated parameter adjustment. + When network health is poor: - Increase mesh degree bounds to improve connectivity - Increase gossip factor to spread messages more widely + - Adjust heartbeat intervals for faster convergence When network health is good: - Use standard parameters for efficiency + - Optimize for lower bandwidth usage """ if not self.adaptive_gossip_enabled: return - # Adapt degree bounds based on health - if self.network_health_score < 0.3: - # Poor health: increase connectivity + health = self.network_health_score + + # More granular health-based adjustments + if health < 0.2: + # Critical health: aggressive adaptation + self.adaptive_degree_low = min(self.degree_low + 3, self.degree_high + 2) + self.adaptive_degree_high = self.degree_high + 4 + self.gossip_factor = min(0.6, 0.25 * 2.0) + elif health < 0.4: + # Poor health: significant adaptation self.adaptive_degree_low = min(self.degree_low + 2, self.degree_high) self.adaptive_degree_high = self.degree_high + 3 - self.gossip_factor = min(0.5, self.gossip_factor * 1.5) - elif self.network_health_score < 0.7: - # Moderate health: slight increase + self.gossip_factor = min(0.5, 0.25 * 1.8) + elif health < 0.6: + # Moderate health: moderate adaptation self.adaptive_degree_low = min(self.degree_low + 1, self.degree_high) + self.adaptive_degree_high = self.degree_high + 2 + self.gossip_factor = min(0.4, 0.25 * 1.4) + elif health < 0.8: + # Good health: slight optimization + self.adaptive_degree_low = self.degree_low self.adaptive_degree_high = self.degree_high + 1 - self.gossip_factor = min(0.4, self.gossip_factor * 1.2) + self.gossip_factor = 0.25 * 1.1 else: - # Good health: use standard parameters + # Excellent health: use base parameters (no further reduction) self.adaptive_degree_low = self.degree_low self.adaptive_degree_high = self.degree_high self.gossip_factor = 0.25 + # Additional v1.4 adaptive features + self._adapt_opportunistic_grafting_parameters(health) + self._adapt_heartbeat_parameters(health) + def _get_adaptive_gossip_peers_count(self, topic: str, total_peers: int) -> int: """ Calculate adaptive number of peers to gossip to. @@ -1909,6 +2346,43 @@ def _get_adaptive_gossip_peers_count(self, topic: str, total_peers: int) -> int: return max(min_count, base_count) + def _adapt_opportunistic_grafting_parameters(self, health: float) -> None: + """ + Adapt opportunistic grafting behavior based on network health. + + :param health: Current network health score (0.0 to 1.0) + """ + # In poor health, be more aggressive about opportunistic grafting + if hasattr(self, "opportunistic_graft_threshold"): + if health < 0.4: + # Lower threshold = more aggressive grafting + self.opportunistic_graft_threshold = 0.3 + elif health < 0.7: + self.opportunistic_graft_threshold = 0.5 + else: + # Higher threshold = more selective grafting + self.opportunistic_graft_threshold = 0.7 + + def _adapt_heartbeat_parameters(self, health: float) -> None: + """ + Adapt heartbeat-related parameters based on network health. + + Poor health: more frequent heartbeats for faster convergence. + Good health: standard interval to save bandwidth. + + :param health: Current network health score (0.0 to 1.0) + """ + base = getattr(self, "heartbeat_interval_base", self.heartbeat_interval) + if health < 0.4: + # Critical: heartbeat every 30-60s for faster recovery + self.heartbeat_interval = max(30, min(60, base // 2)) + elif health < 0.7: + # Moderate: slightly more frequent than baseline + self.heartbeat_interval = max(60, int(base * 0.75)) + else: + # Good health: use baseline interval + self.heartbeat_interval = base + def _check_spam_protection(self, peer_id: ID, msg: rpc_pb2.Message) -> bool: """ Check if message should be rejected due to spam protection. @@ -1952,7 +2426,7 @@ def _check_equivocation(self, msg: rpc_pb2.Message) -> bool: :param msg: The message to check :return: True if message is valid, False if equivocation detected """ - msg_key = (msg.seqno, msg.from_id) + msg_key = msg.from_id + msg.seqno if msg_key in self.equivocation_detection: existing_msg = self.equivocation_detection[msg_key] @@ -1961,7 +2435,7 @@ def _check_equivocation(self, msg: rpc_pb2.Message) -> bool: logger.warning("Equivocation detected from peer %s", ID(msg.from_id)) # Severely penalize equivocating peer if self.scorer is not None: - self.scorer.penalize_behavior(ID(msg.from_id), 100.0) + self.scorer.penalize_equivocation(ID(msg.from_id), 100.0) return False else: # Store first occurrence @@ -2076,6 +2550,14 @@ def _cleanup_security_state(self, peer_id: ID) -> None: if peer_id in self.message_rate_limits: del self.message_rate_limits[peer_id] + # Clean up v1.4 rate limiting data + if peer_id in self.iwant_request_limits: + del self.iwant_request_limits[peer_id] + if peer_id in self.ihave_message_limits: + del self.ihave_message_limits[peer_id] + if peer_id in self.graft_flood_tracking: + del self.graft_flood_tracking[peer_id] + def _perform_opportunistic_grafting( self, topic: str, peers_to_graft: DefaultDict[ID, list[str]] ) -> int: @@ -2158,23 +2640,20 @@ def _calculate_grafting_threshold( """ Calculate the score threshold for opportunistic grafting candidates. + Uses opportunistic_graft_threshold (adapted by + _adapt_opportunistic_grafting_parameters based on network health) to control + aggressiveness: lower = more aggressive, higher = more selective. + :param median_score: Median score of current mesh peers :param avg_score: Average score of current mesh peers :param min_score: Minimum score of current mesh peers :param topic: The topic being considered :return: Score threshold for candidates """ - # Base threshold is median score - threshold = median_score - - # Adjust based on network health - if hasattr(self, "network_health_score"): - if self.network_health_score < 0.5: - # Poor network health: be more aggressive, lower threshold - threshold = min(median_score, avg_score * 0.8) - elif self.network_health_score > 0.8: - # Good network health: be more selective, higher threshold - threshold = max(median_score, avg_score * 1.2) + # Base threshold from median, scaled by opportunistic_graft_threshold + # Lower threshold (aggressive) = more peers qualify; higher (selective) = fewer + graft_threshold = getattr(self, "opportunistic_graft_threshold", 0.5) + threshold = median_score * graft_threshold # Ensure threshold is reasonable threshold = max( @@ -2424,7 +2903,7 @@ def _prune_for_ip_diversity( return selected - def _maintain_mesh_quality(self, topic: str) -> None: + async def _maintain_mesh_quality(self, topic: str) -> None: """ Perform ongoing mesh quality maintenance beyond basic degree bounds. @@ -2438,14 +2917,17 @@ def _maintain_mesh_quality(self, topic: str) -> None: return # Check if we should replace low-scoring peers with better alternatives - self._consider_peer_replacement(topic) + await self._consider_peer_replacement(topic) # Ensure we maintain good connectivity patterns self._optimize_mesh_connectivity(topic) - def _consider_peer_replacement(self, topic: str) -> None: + async def _consider_peer_replacement(self, topic: str) -> None: """ - Consider replacing the worst mesh peer with a better alternative. + Replace the worst mesh peer with a better alternative when beneficial. + + Performs mesh mutation, sends PRUNE to the removed peer and GRAFT to the + new peer. :param topic: The topic to consider """ @@ -2486,36 +2968,76 @@ def _consider_peer_replacement(self, topic: str) -> None: best_replacement = peer best_score = peer_score - # Perform replacement if beneficial - if best_replacement is not None: - logger.debug( - "Replacing mesh peer %s (score: %.2f) with %s (score: %.2f) " - "in topic %s", - worst_peer, - worst_score, - best_replacement, - best_score, - topic, - ) + if best_replacement is None: + return - # Note: In a full implementation, we'd send PRUNE to worst_peer - # and GRAFT to best_replacement. For now, we just log the decision + # Perform replacement: mutate mesh and emit PRUNE/GRAFT + self.mesh[topic].discard(worst_peer) + self.mesh[topic].add(best_replacement) + + if self.scorer is not None: + self.scorer.on_leave_mesh(worst_peer, topic) + self.scorer.on_join_mesh(best_replacement, topic) + + # Add back_off so we don't immediately re-graft the pruned peer + self._add_back_off(worst_peer, topic, False) + + # Track PRUNE time for GRAFT flood protection + self.graft_flood_tracking[worst_peer][topic] = time.time() + + logger.debug( + "Replacing mesh peer %s (score: %.2f) with %s (score: %.2f) in topic %s", + worst_peer, + worst_score, + best_replacement, + best_score, + topic, + ) + + try: + await self.emit_prune(topic, worst_peer, self.do_px, False) + await self.emit_graft(topic, best_replacement) + except Exception as e: + logger.warning("Failed to emit PRUNE/GRAFT during peer replacement: %s", e) + # Revert mesh and scorer on failure + self.mesh[topic].add(worst_peer) + self.mesh[topic].discard(best_replacement) + if self.scorer is not None: + self.scorer.on_join_mesh(worst_peer, topic) + self.scorer.on_leave_mesh(best_replacement, topic) + # Clear back_off we added (peer stays in back_off until expiry) + if topic in self.back_off and worst_peer in self.back_off[topic]: + del self.back_off[topic][worst_peer] def _optimize_mesh_connectivity(self, topic: str) -> None: """ Optimize mesh connectivity patterns for better resilience. + Validates mesh invariants and applies lightweight optimizations. + IP diversity is handled by _ensure_mesh_diversity and _prune_for_ip_diversity. + Geographic/latency optimization would require additional metrics. + :param topic: The topic to optimize """ - # This could include: - # - Ensuring geographic diversity - # - Balancing inbound/outbound connections - # - Optimizing for network latency - # For now, we focus on IP diversity which is handled elsewhere - pass + if topic not in self.mesh or self.pubsub is None: + return - # Clean up equivocation detection (keep recent entries for a while) - # This is done periodically in heartbeat to avoid memory leaks + mesh_peers = self.mesh[topic] + effective_high = ( + self.adaptive_degree_high + if self.adaptive_gossip_enabled + else self.degree_high + ) + + # Sanity check: mesh should not exceed degree_high (handled in mesh_heartbeat, + # but we verify here as a safeguard). No action needed if within bounds. + if len(mesh_peers) > effective_high + 2: + logger.debug( + "Mesh for topic %s exceeds expected bounds (%d > %d)", + topic, + len(mesh_peers), + effective_high, + ) def _periodic_security_cleanup(self) -> None: """ @@ -2547,3 +3069,38 @@ def _periodic_security_cleanup(self) -> None: # Remove empty peer entries if not self.message_rate_limits[peer_id]: del self.message_rate_limits[peer_id] + + # Clean up v1.4 rate limiting data + for peer_id in list(self.iwant_request_limits.keys()): + for request_type in list(self.iwant_request_limits[peer_id].keys()): + timestamps = self.iwant_request_limits[peer_id][request_type] + cutoff = current_time - 2.0 + timestamps[:] = [t for t in timestamps if t > cutoff] + + if not timestamps: + del self.iwant_request_limits[peer_id][request_type] + + if not self.iwant_request_limits[peer_id]: + del self.iwant_request_limits[peer_id] + + for peer_id in list(self.ihave_message_limits.keys()): + for topic in list(self.ihave_message_limits[peer_id].keys()): + timestamps = self.ihave_message_limits[peer_id][topic] + cutoff = current_time - 2.0 + timestamps[:] = [t for t in timestamps if t > cutoff] + + if not timestamps: + del self.ihave_message_limits[peer_id][topic] + + if not self.ihave_message_limits[peer_id]: + del self.ihave_message_limits[peer_id] + + # Clean up old GRAFT flood tracking (keep for 30 seconds) + graft_cutoff = current_time - 30.0 + for peer_id in list(self.graft_flood_tracking.keys()): + for topic in list(self.graft_flood_tracking[peer_id].keys()): + if self.graft_flood_tracking[peer_id][topic] <= graft_cutoff: + del self.graft_flood_tracking[peer_id][topic] + + if not self.graft_flood_tracking[peer_id]: + del self.graft_flood_tracking[peer_id] diff --git a/libp2p/pubsub/utils.py b/libp2p/pubsub/utils.py index 76e7ed519..a777b85e0 100644 --- a/libp2p/pubsub/utils.py +++ b/libp2p/pubsub/utils.py @@ -54,6 +54,19 @@ def maybe_consume_signed_record(msg: RPC, host: IHost, peer_id: ID) -> bool: return True +def safe_bytes_from_hex(s: str | bytes) -> bytes | None: + """ + Safely decode a hex string to bytes. Returns None if invalid. + If input is already bytes, returns it as-is. + """ + if isinstance(s, bytes): + return s + try: + return bytes.fromhex(s) + except (ValueError, TypeError): + return None + + def parse_message_id_safe(msg_id_str: str) -> MessageID: """Safely handle message ID as string.""" return MessageID(msg_id_str) diff --git a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py index 0483d7926..d6b212e25 100644 --- a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py +++ b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py @@ -1,7 +1,6 @@ -import pytest from unittest.mock import AsyncMock -import trio +import pytest from libp2p.peer.id import ID from libp2p.pubsub.extensions import ( @@ -20,7 +19,6 @@ IDFactory, ) - # --------------------------------------------------------------------------- # PeerExtensions tests # --------------------------------------------------------------------------- @@ -165,18 +163,19 @@ def test_extensions_state_remove_peer_clears_state() -> None: def test_both_support_topic_observation_query() -> None: """both_support_topic_observation returns True only when both sides advertise it.""" - peer_id = IDFactory() state = ExtensionsState(my_extensions=PeerExtensions(topic_observation=True)) - # Peer without topicObservation support. + # Peer that did not advertise topicObservation (we only accept first Extensions per peer). + peer_no = IDFactory() rpc_no = _make_rpc_with_extensions(topic_observation=False) - state.handle_rpc(rpc_no, peer_id) - assert not state.both_support_topic_observation(peer_id) + state.handle_rpc(rpc_no, peer_no) + assert not state.both_support_topic_observation(peer_no) - # Overwrite with a peer that does support topicObservation. + # Different peer that does advertise topicObservation. + peer_yes = IDFactory() rpc_yes = _make_rpc_with_extensions(topic_observation=True) - state.handle_rpc(rpc_yes, peer_id) - assert state.both_support_topic_observation(peer_id) + state.handle_rpc(rpc_yes, peer_yes) + assert state.both_support_topic_observation(peer_yes) def test_gossipsub_report_extensions_misbehaviour_penalizes_behavior() -> None: @@ -426,4 +425,3 @@ def __init__(self) -> None: await router.stop_observing_topic(topic) assert not router.topic_observation.is_observing(topic) assert router.topic_observation.get_subscriber_peers_for_topic(topic) == set() - From bf13bc66fdbf863c40e6a643d701f3a298266c22 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 7 Mar 2026 22:13:44 +0530 Subject: [PATCH 07/15] resolved pre commit hook issues --- libp2p/pubsub/gossipsub.py | 3 +- libp2p/pubsub/utils.py | 16 ++------- .../pubsub/test_gossipsub_v14_extensions.py | 36 +++++++------------ .../pubsub/test_gossipsub_v1_3_extensions.py | 27 +++++++------- 4 files changed, 31 insertions(+), 51 deletions(-) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index d06050f29..20db0f85a 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -630,7 +630,8 @@ async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: if control_message.idontwant: for idontwant in control_message.idontwant: await self.handle_idontwant(idontwant, sender_peer_id) - # v1.3 Extensions control message is processed above via extensions_state.handle_rpc() + # v1.3 Extensions control message is processed above via + # extensions_state.handle_rpc() # GossipSub v1.3 – Topic Observation extension if self.supports_v13_features(sender_peer_id): diff --git a/libp2p/pubsub/utils.py b/libp2p/pubsub/utils.py index 3352d16a4..78e5f2d1f 100644 --- a/libp2p/pubsub/utils.py +++ b/libp2p/pubsub/utils.py @@ -56,7 +56,8 @@ def maybe_consume_signed_record(msg: RPC, host: IHost, peer_id: ID) -> bool: def safe_bytes_from_hex(s: str | bytes) -> bytes | None: """ Safely decode a hex string to bytes. Returns None if invalid. - If input is already bytes, returns it as-is. + If input is already bytes, returns it as-is. Used for parsing wire + message IDs in IHAVE/IWANT so malformed hex from peers does not crash. """ if isinstance(s, bytes): return s @@ -69,16 +70,3 @@ def safe_bytes_from_hex(s: str | bytes) -> bytes | None: def parse_message_id_safe(msg_id_str: str) -> MessageID: """Safely handle message ID as string.""" return MessageID(msg_id_str) - - -def safe_bytes_from_hex(hex_str: str) -> bytes | None: - """ - Decode a hex-encoded string to bytes, returning None on failure. - - Used for defensively parsing wire message IDs in IHAVE/IWANT handlers - so that malformed hex from peers does not crash the gossip handler task. - """ - try: - return bytes.fromhex(hex_str) - except ValueError: - return None diff --git a/tests/core/pubsub/test_gossipsub_v14_extensions.py b/tests/core/pubsub/test_gossipsub_v14_extensions.py index 4c6360f6b..782a44039 100644 --- a/tests/core/pubsub/test_gossipsub_v14_extensions.py +++ b/tests/core/pubsub/test_gossipsub_v14_extensions.py @@ -168,14 +168,9 @@ async def test_unregistered_extension_handling(): @pytest.mark.trio async def test_extension_message_from_unsupported_peer(): - """Test receiving extension message from peer that doesn't support extensions.""" + """Extension data from a v1.1 peer is not processed; only v1.3+ are handled.""" from libp2p.pubsub.gossipsub import PROTOCOL_ID_V11 - received_count = [0] # Use list to make it mutable - - async def handler(data: bytes, sender_peer_id: ID): - received_count[0] += 1 - # Create one v1.4 peer and one v1.1 peer async with PubsubFactory.create_batch_with_gossipsub( 1, protocols=[PROTOCOL_ID_V14] @@ -184,35 +179,30 @@ async def handler(data: bytes, sender_peer_id: ID): 1, protocols=[PROTOCOL_ID_V11] ) as v11_pubsubs: v14_router = v14_pubsubs[0].router - v11_router = v11_pubsubs[0].router assert isinstance(v14_router, GossipSub) - assert isinstance(v11_router, GossipSub) - # Register handler on v1.4 peer - v14_router.register_extension_handler("test-ext", handler) - - # Connect peers await connect(v14_pubsubs[0].host, v11_pubsubs[0].host) await trio.sleep(0.5) - # Manually set the peer protocol mapping to simulate v1.1 peer + # Simulate v1.1 peer: router thinks this peer speaks v1.1 only v11_peer_id = v11_pubsubs[0].host.get_id() v14_router.peer_protocol[v11_peer_id] = PROTOCOL_ID_V11 - # Verify the peer is recognized as not supporting extensions assert not v14_router.supports_protocol_feature(v11_peer_id, "extensions") - # Create extension message from v1.1 peer - extension_msg = rpc_pb2.ControlExtension() - extension_msg.name = "test-ext" - extension_msg.data = b"test data" + # Build an RPC that would carry control.extensions (as a v1.3 peer would) + rpc = rpc_pb2.RPC() + rpc.control.CopyFrom(rpc_pb2.ControlMessage()) + rpc.control.extensions.CopyFrom( + rpc_pb2.ControlExtensions(topicObservation=True) + ) - # This should be ignored due to protocol version check - await v14_router.handle_extension(extension_msg, v11_peer_id) - await trio.sleep(0.5) + # handle_rpc must not process extensions for v1.1 peers + await v14_router.handle_rpc(rpc, v11_peer_id) - # Handler should not have been called - assert received_count[0] == 0 + # v1.1 peer's extensions must not be recorded (we skip extensions_state + # when sender does not support v1.3+) + assert v14_router.extensions_state.get_peer_extensions(v11_peer_id) is None @pytest.mark.trio diff --git a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py index d6b212e25..a3a1d2f47 100644 --- a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py +++ b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py @@ -14,6 +14,7 @@ ) from libp2p.pubsub.pb import rpc_pb2 from libp2p.pubsub.score import ScoreParams +from libp2p.tools.constants import GOSSIPSUB_PARAMS from tests.utils.factories import ( GossipsubFactory, IDFactory, @@ -165,7 +166,8 @@ def test_both_support_topic_observation_query() -> None: """both_support_topic_observation returns True only when both sides advertise it.""" state = ExtensionsState(my_extensions=PeerExtensions(topic_observation=True)) - # Peer that did not advertise topicObservation (we only accept first Extensions per peer). + # Peer that did not advertise topicObservation (we only accept first + # Extensions per peer). peer_no = IDFactory() rpc_no = _make_rpc_with_extensions(topic_observation=False) state.handle_rpc(rpc_no, peer_no) @@ -185,24 +187,23 @@ def test_gossipsub_report_extensions_misbehaviour_penalizes_behavior() -> None: p5_behavior_penalty_threshold=0.0, p5_behavior_penalty_decay=1.0, ) - router = GossipsubFactory(score_params=score_params) + router = GossipSub( + protocols=[PROTOCOL_ID_V13], + degree=GOSSIPSUB_PARAMS.degree, + degree_low=GOSSIPSUB_PARAMS.degree_low, + degree_high=GOSSIPSUB_PARAMS.degree_high, + score_params=score_params, + ) assert isinstance(router, GossipSub) assert router.scorer is not None peer_id = IDFactory() - - penalize_calls: list[tuple[ID, float]] = [] - original_penalize = router.scorer.penalize_behavior - - def wrapped_penalize(p: ID, amount: float = 1.0) -> None: - penalize_calls.append((p, amount)) - original_penalize(p, amount) - - router.scorer.penalize_behavior = wrapped_penalize # type: ignore[assignment] + assert peer_id not in router.scorer.behavior_penalty router._report_extensions_misbehaviour(peer_id) - assert penalize_calls == [(peer_id, 1.0)] + # _report_extensions_misbehaviour must call scorer.penalize_behavior(peer_id, 1.0) + assert router.scorer.behavior_penalty[peer_id] == 1.0 # --------------------------------------------------------------------------- @@ -389,7 +390,7 @@ def __init__(self) -> None: @pytest.mark.trio async def test_start_and_stop_observing_topic_high_level_api() -> None: - """start_observing_topic / stop_observing_topic should delegate to OBSERVE/UNOBSERVE.""" + """start_observing_topic / stop_observing_topic delegate to OBSERVE/UNOBSERVE.""" router = GossipsubFactory() assert isinstance(router, GossipSub) From 92d973bb45b8547f357635bf843deb1725e8394a Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Sat, 7 Mar 2026 23:29:04 +0530 Subject: [PATCH 08/15] tests: align v14 extension tests with no-op emit_extension (v1.3 wire format) --- .../pubsub/test_gossipsub_v14_extensions.py | 65 +++++++++++++------ 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/tests/core/pubsub/test_gossipsub_v14_extensions.py b/tests/core/pubsub/test_gossipsub_v14_extensions.py index 782a44039..86bcc13b6 100644 --- a/tests/core/pubsub/test_gossipsub_v14_extensions.py +++ b/tests/core/pubsub/test_gossipsub_v14_extensions.py @@ -49,7 +49,15 @@ async def test_handler(data: bytes, sender_peer_id: ID): @pytest.mark.trio async def test_extension_message_handling(): - """Test extension message handling between peers.""" + """ + Test extension handler registration and that emit_extension completes. + + In GossipSub v1.3 wire format, extension data is only sent in the first + hello (control.extensions); there is no wire format for arbitrary + extension name/data. So emit_extension is a no-op and handlers are not + invoked for custom data. This test verifies registration and that + emit_extension completes without raising. + """ received_extensions = [] async def extension_handler(data: bytes, sender_peer_id: ID): @@ -73,17 +81,17 @@ async def extension_handler(data: bytes, sender_peer_id: ID): # Get peer IDs peer1_id = pubsubs[1].host.get_id() - # Send extension message from router0 to router1 + # emit_extension is a no-op (v1.3 does not send arbitrary extension + # name/data); it should complete without raising. test_data = b"test extension data" await router0.emit_extension("test-ext", test_data, peer1_id) # Wait for message processing await trio.sleep(0.5) - # Verify extension was received and handled - assert len(received_extensions) == 1 - assert received_extensions[0][0] == test_data - assert received_extensions[0][1] == pubsubs[0].host.get_id() + # No extension data is delivered over the wire (only hello carries + # control.extensions), so the handler is never called. + assert len(received_extensions) == 0 @pytest.mark.trio @@ -110,7 +118,13 @@ async def test_extension_message_to_unsupported_peer(): @pytest.mark.trio async def test_extension_handler_error_handling(): - """Test error handling in extension handlers.""" + """ + Test that emit_extension completes when a failing handler is registered. + + emit_extension is a no-op (v1.3 does not send arbitrary extension data), + so the handler is never invoked. We verify emit_extension does not raise + and the handler is not called. + """ error_count = [0] # Use list to make it mutable async def failing_handler(data: bytes, sender_peer_id: ID): @@ -132,15 +146,15 @@ async def failing_handler(data: bytes, sender_peer_id: ID): await connect(pubsubs[0].host, pubsubs[1].host) await trio.sleep(0.5) - # Send extension message + # emit_extension is a no-op; should complete without raising peer1_id = pubsubs[1].host.get_id() await router0.emit_extension("failing-ext", b"data", peer1_id) # Wait for processing await trio.sleep(0.5) - # Verify handler was called but error was caught - assert error_count[0] == 1 + # Handler is never called (no extension data is sent over the wire) + assert error_count[0] == 0 @pytest.mark.trio @@ -207,7 +221,13 @@ async def test_extension_message_from_unsupported_peer(): @pytest.mark.trio async def test_multiple_extension_handlers(): - """Test multiple extension handlers on the same router.""" + """ + Test multiple extension handlers registered and emit_extension completes. + + emit_extension is a no-op (v1.3 does not send arbitrary extension data), + so handlers are never invoked. We verify both handlers can be registered + and emit_extension for each completes without raising. + """ received_messages = [] async def handler1(data: bytes, sender_peer_id: ID): @@ -232,7 +252,7 @@ async def handler2(data: bytes, sender_peer_id: ID): await connect(pubsubs[0].host, pubsubs[1].host) await trio.sleep(0.5) - # Send messages to different extensions + # emit_extension is a no-op for both; should complete without raising peer1_id = pubsubs[1].host.get_id() await router0.emit_extension("ext1", b"data1", peer1_id) await router0.emit_extension("ext2", b"data2", peer1_id) @@ -240,15 +260,19 @@ async def handler2(data: bytes, sender_peer_id: ID): # Wait for processing await trio.sleep(0.5) - # Verify both handlers were called - assert len(received_messages) == 2 - assert ("handler1", b"data1") in received_messages - assert ("handler2", b"data2") in received_messages + # No extension data is delivered, so neither handler is called + assert len(received_messages) == 0 @pytest.mark.trio async def test_extension_v13_compatibility(): - """Test extensions work with v1.3 protocol.""" + """ + Test extension registration and emit_extension with v1.3 protocol. + + Same as v1.4: emit_extension is a no-op (v1.3 wire format only sends + extensions in the first hello). We verify registration and that + emit_extension completes without raising. + """ received_extensions = [] async def extension_handler(data: bytes, sender_peer_id: ID): @@ -269,7 +293,7 @@ async def extension_handler(data: bytes, sender_peer_id: ID): await connect(pubsubs[0].host, pubsubs[1].host) await trio.sleep(0.5) - # Send extension message + # emit_extension is a no-op; should complete without raising peer1_id = pubsubs[1].host.get_id() test_data = b"v1.3 extension data" await router0.emit_extension("v13-ext", test_data, peer1_id) @@ -277,6 +301,5 @@ async def extension_handler(data: bytes, sender_peer_id: ID): # Wait for processing await trio.sleep(0.5) - # Verify extension was handled - assert len(received_extensions) == 1 - assert received_extensions[0][0] == test_data + # No extension data is delivered over the wire + assert len(received_extensions) == 0 From dc3ba5d32bcc72018413d40d3346d8186d0f1536 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Tue, 10 Mar 2026 00:03:39 +0530 Subject: [PATCH 09/15] docs: fix GossipSub.handle_observe docstring list formatting to satisfy Sphinx --- libp2p/pubsub/gossipsub.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 20db0f85a..f3f8c7224 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -1781,10 +1781,11 @@ async def handle_observe( will send an IHAVE to *sender_peer_id* immediately (not at the next heartbeat). - Per the Topic Observation spec, only peers that: - 1. Negotiated ``/meshsub/1.3.0`` (checked by the caller), AND - 2. Advertised the ``topicObservation`` extension in their first message - should be permitted to send OBSERVE. + Per the Topic Observation spec, only peers that meet both of the + following conditions should be permitted to send OBSERVE: + + - Negotiated ``/meshsub/1.3.0`` (checked by the caller), and + - Advertised the ``topicObservation`` extension in their first message. :param observe_msg: The OBSERVE control message. :param sender_peer_id: ID of the peer that sent the OBSERVE. From d4cc6e74527c1e5a8d8d0232fa2f8e5a95aba88c Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Tue, 17 Mar 2026 01:26:51 +0530 Subject: [PATCH 10/15] added newsfragement, replaced string with hex method and protocol version check --- docs/examples.rst | 1 + docs/gossipsub-1.3.rst | 75 ++++++++++++++++++++++++++++++++++ docs/libp2p.pubsub.rst | 4 +- libp2p/pubsub/gossipsub.py | 4 +- libp2p/pubsub/pb/rpc.proto | 5 +++ libp2p/pubsub/pubsub.py | 22 +++++++--- newsfragments/1231.feature.rst | 5 +++ 7 files changed, 108 insertions(+), 8 deletions(-) create mode 100644 docs/gossipsub-1.3.rst create mode 100644 newsfragments/1231.feature.rst diff --git a/docs/examples.rst b/docs/examples.rst index 09f0edc59..a29acf2f0 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -24,6 +24,7 @@ Examples examples.multiple_connections tls-support gossipsub-1.2 + gossipsub-1.3 examples.websocket examples.tls examples.autotls diff --git a/docs/gossipsub-1.3.rst b/docs/gossipsub-1.3.rst new file mode 100644 index 000000000..134a11f19 --- /dev/null +++ b/docs/gossipsub-1.3.rst @@ -0,0 +1,75 @@ +GossipSub 1.3 Extensions and Topic Observation +============================================= + +Overview +-------- + +Py-libp2p supports the GossipSub v1.3 Extensions Control Message mechanism and the +Topic Observation extension. These features require negotiating the +``/meshsub/1.3.0`` protocol (or later) with peers. + +Topic Observation +----------------- + +The Topic Observation extension allows a peer to receive IHAVE notifications for +a topic without being a full subscriber. This is useful for presence awareness: +knowing when messages are published on a topic without actually receiving the +message payloads. + +Lifecycle +~~~~~~~~~ + +1. **Start observing**: Call ``start_observing_topic(topic)`` to send OBSERVE + control messages to in-topic peers that support the extension. The router + will then send IHAVE notifications to you when new messages arrive on that + topic. + +2. **Receive IHAVE**: As an observer, you receive IHAVE control messages + containing message IDs. These are presence notifications only; observers do + not typically reply with IWANT to fetch the actual messages. + +3. **Stop observing**: Call ``stop_observing_topic(topic)`` to send UNOBSERVE + control messages and stop receiving IHAVE notifications for that topic. + +Usage Example +~~~~~~~~~~~~~ + +.. code-block:: python + + from libp2p import new_node + from libp2p.pubsub.gossipsub import GossipSub + from libp2p.pubsub.pubsub import PubSub + + # Create node with GossipSub v1.3 + node = await new_node() + gossipsub = GossipSub() # Default config includes v1.3 protocols + pubsub = PubSub(gossipsub) + await node.start() + pubsub.set_pubsub(node) + + # Start observing a topic (no subscription; IHAVE-only notifications) + await gossipsub.start_observing_topic("my-topic") + + # ... later, when done ... + await gossipsub.stop_observing_topic("my-topic") + +Protocol Requirements +~~~~~~~~~~~~~~~~~~~~ + +* Topic Observation requires both peers to negotiate ``/meshsub/1.3.0`` (or + later) and to advertise support via the Extensions Control Message. +* Extensions are only sent when the negotiated protocol is v1.3+; peers on + v1.1/v1.2 do not receive extension fields. + +Specification References +------------------------ + +* `GossipSub v1.3 Extensions `_ +* `Topic Observation proposal `_ + +Related Documentation +--------------------- + +* :doc:`gossipsub-1.2` - GossipSub 1.2 features (IDONTWANT, etc.) +* :doc:`examples.pubsub` - PubSub chat example +* :doc:`libp2p.pubsub` - Complete PubSub API documentation diff --git a/docs/libp2p.pubsub.rst b/docs/libp2p.pubsub.rst index 62eda2fca..a6f53b992 100644 --- a/docs/libp2p.pubsub.rst +++ b/docs/libp2p.pubsub.rst @@ -3,7 +3,9 @@ libp2p.pubsub package Py-libp2p provides a comprehensive PubSub implementation with support for both FloodSub and GossipSub protocols, including the latest GossipSub 1.2 specification with IDONTWANT control messages for improved bandwidth efficiency. -For detailed information about GossipSub 1.2 features and configuration, see :doc:`gossipsub-1.2`. +For detailed information about GossipSub features and configuration, see +:doc:`gossipsub-1.2` (IDONTWANT, etc.) and :doc:`gossipsub-1.3` (v1.3 extensions, +Topic Observation). Subpackages ----------- diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 3305c31ec..5cced211e 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -2171,7 +2171,9 @@ async def _notify_observers(self, topic_ids: Iterable[str], msg_id: bytes) -> No return pubsub = self.pubsub # narrow type for pyrefly / mypy - msg_id_str = str(msg_id) + # Use hex() to match heartbeat path; str(bytes) produces "b'...'" which + # fails safe_bytes_from_hex() in handle_ihave(). + msg_id_str = msg_id.hex() for topic in topic_ids: observers = self.topic_observation.get_observers(topic) diff --git a/libp2p/pubsub/pb/rpc.proto b/libp2p/pubsub/pb/rpc.proto index 45008a232..b0691e93f 100644 --- a/libp2p/pubsub/pb/rpc.proto +++ b/libp2p/pubsub/pb/rpc.proto @@ -2,6 +2,11 @@ // Updated with GossipSub v1.3 Extensions Control Message support. // Spec: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.3.md // extensions.proto: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/extensions/extensions.proto +// +// Interop note: The Topic Observation extension (observe/unobserve, topicObservation) +// is not yet in the upstream libp2p/specs extensions.proto. Field numbers +// follow the go-libp2p reference implementation for cross-client interop. +// See: https://ethresear.ch/t/gossipsub-topic-observation-proposed-gossipsub-1-3/20907 syntax = "proto2"; diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 0cdc62f6c..f7731045e 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -90,6 +90,16 @@ signature_validator, ) +# GossipSub v1.3+ protocol IDs. Extensions Control Message is only sent when +# negotiating one of these protocols (per spec: extensions in first message). +_MESHSUB_V13_PLUS = frozenset( + ( + TProtocol("/meshsub/1.3.0"), + TProtocol("/meshsub/1.4.0"), + TProtocol("/meshsub/2.0.0"), + ) +) + class _RouterWithExtensions(Protocol): """Protocol for a router that supports GossipSub v1.3 extensions.""" @@ -735,14 +745,14 @@ async def _handle_new_peer(self, peer_id: ID) -> None: # GossipSub v1.3 – Extensions Control Message injection. # Per spec: "If a peer supports any extension, the Extensions control # message MUST be included in the first message on the stream." - # We ask the router (if it is a v1.3-capable GossipSub router) to - # attach ControlExtensions to the hello packet before we serialise it. - # This is done via duck-typing so pubsub.py stays decoupled from - # gossipsub.py (matching the existing architecture). + # Only inject when we negotiated v1.3+; peers on v1.1/v1.2 must not + # receive extension fields. negotiated_protocol = stream.get_protocol() router = self.router - if hasattr(router, "extensions_state") and hasattr( - router, "supports_v13_features" + if ( + negotiated_protocol in _MESHSUB_V13_PLUS + and hasattr(router, "extensions_state") + and hasattr(router, "supports_v13_features") ): # We pass the peer_id because extensions_state needs to track # "sent_extensions" per peer for the at-most-once rule. diff --git a/newsfragments/1231.feature.rst b/newsfragments/1231.feature.rst new file mode 100644 index 000000000..14eea29ac --- /dev/null +++ b/newsfragments/1231.feature.rst @@ -0,0 +1,5 @@ +GossipSub v1.3 Extensions and Topic Observation support. + +- Added Extensions Control Message mechanism per GossipSub v1.3 spec; extensions are advertised in the first message on the stream and gated to v1.3+ protocol negotiation. +- Added Topic Observation extension: peers can observe topics without full subscription via ``start_observing_topic()`` and ``stop_observing_topic()``, receiving IHAVE notifications for presence awareness. +- Added test extension for cross-implementation interop (go-libp2p, rust-libp2p, py-libp2p). From ba224a8a4b3c81bf260961a3902d79fddb49faa3 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Thu, 26 Mar 2026 20:54:25 +0530 Subject: [PATCH 11/15] Fix rpc_queue extensions splitting for optional ControlExtensions and align pubsub IHAVE tests with hex message IDs --- libp2p/pubsub/rpc_queue.py | 10 +++---- .../pubsub/test_gossipsub_v1_3_extensions.py | 4 +-- tests/core/pubsub/test_rpc_queue.py | 30 +++++++++++-------- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/libp2p/pubsub/rpc_queue.py b/libp2p/pubsub/rpc_queue.py index 32b5ef092..29a386573 100644 --- a/libp2p/pubsub/rpc_queue.py +++ b/libp2p/pubsub/rpc_queue.py @@ -367,15 +367,15 @@ def split_rpc(self, rpc: rpc_pb2.RPC) -> list[rpc_pb2.RPC]: ) ) - # EXTENSIONS - for ext in ctrl.extensions: - current.control.extensions.append(ext) + # EXTENSIONS (optional singular message) + if ctrl.HasField("extensions"): + current.control.extensions.CopyFrom(ctrl.extensions) if current.ByteSize() > limit: - del current.control.extensions[-1] + current.control.ClearField("extensions") out.append(current) current = rpc_pb2.RPC() current.control.SetInParent() - current.control.extensions.append(ext) + current.control.extensions.CopyFrom(ctrl.extensions) # ── Flush remaining ── if current.ByteSize() > 0: diff --git a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py index a3a1d2f47..79ff2ad2b 100644 --- a/tests/core/pubsub/test_gossipsub_v1_3_extensions.py +++ b/tests/core/pubsub/test_gossipsub_v1_3_extensions.py @@ -380,12 +380,12 @@ def __init__(self) -> None: await router._notify_observers([topic], msg_id) - # emit_ihave(topic, [str(msg_id)], observer_peer) is expected. + # emit_ihave(topic, [msg_id.hex()], observer_peer) is expected. router.emit_ihave.assert_awaited_once() called_topic, called_msg_ids, called_peer = router.emit_ihave.call_args.args assert called_topic == topic assert called_peer == observer_peer - assert called_msg_ids == [str(msg_id)] + assert called_msg_ids == [msg_id.hex()] @pytest.mark.trio diff --git a/tests/core/pubsub/test_rpc_queue.py b/tests/core/pubsub/test_rpc_queue.py index 45123eb84..3e502b44e 100644 --- a/tests/core/pubsub/test_rpc_queue.py +++ b/tests/core/pubsub/test_rpc_queue.py @@ -510,32 +510,36 @@ def test_propagate_on_empty_out(self) -> None: class TestSplitRpcExtensions: def test_fast_path(self) -> None: rpc = rpc_pb2.RPC() - ext = rpc.control.extensions.add() - ext.name = "test-ext" - ext.data = b"ext-data" + rpc.control.extensions.topicObservation = True parts = RpcQueue(max_message_size=10000).split_rpc(rpc) assert len(parts) == 1 - assert parts[0].control.extensions[0].name == "test-ext" + assert parts[0].control.HasField("extensions") + assert parts[0].control.extensions.topicObservation is True def test_slow_path_split(self) -> None: rpc = rpc_pb2.RPC() for i in range(20): rpc.control.graft.add().topicID = f"topic-{i}" * 10 - for i in range(10): - ext = rpc.control.extensions.add() - ext.name = f"ext-{i}" - ext.data = b"x" * 50 + rpc.control.extensions.topicObservation = True parts = RpcQueue(max_message_size=100).split_rpc(rpc) - all_exts = [ - e for p in parts if p.HasField("control") for e in p.control.extensions - ] - assert len(all_exts) == 10 + assert len(parts) > 1 + ext_count = sum( + 1 + for p in parts + if p.HasField("control") and p.control.HasField("extensions") + ) + assert ext_count == 1 + assert any( + p.control.extensions.topicObservation + for p in parts + if p.HasField("control") and p.control.HasField("extensions") + ) def test_extension_only_not_filtered(self) -> None: rpc = rpc_pb2.RPC() - rpc.control.extensions.add().name = "only" + rpc.control.extensions.topicObservation = True assert _rpc_has_data(rpc) is True parts = RpcQueue(max_message_size=10000).split_rpc(rpc) From d2be9ee39169048e7dac84bbfc1d31d94d4af36f Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Thu, 26 Mar 2026 21:07:14 +0530 Subject: [PATCH 12/15] formatted gossipsub 1.3 docs --- docs/gossipsub-1.3.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/gossipsub-1.3.rst b/docs/gossipsub-1.3.rst index 134a11f19..ff94b6234 100644 --- a/docs/gossipsub-1.3.rst +++ b/docs/gossipsub-1.3.rst @@ -1,5 +1,5 @@ GossipSub 1.3 Extensions and Topic Observation -============================================= +============================================== Overview -------- @@ -54,7 +54,7 @@ Usage Example await gossipsub.stop_observing_topic("my-topic") Protocol Requirements -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ * Topic Observation requires both peers to negotiate ``/meshsub/1.3.0`` (or later) and to advertise support via the Extensions Control Message. From cdebdc906388fe15506f8ddaf1d1118a9620f52e Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Mon, 30 Mar 2026 02:42:09 +0530 Subject: [PATCH 13/15] refactor(pubsub): type ExtensionsState misbehaviour callback as Callable[[ID], None] --- libp2p/pubsub/extensions.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/libp2p/pubsub/extensions.py b/libp2p/pubsub/extensions.py index abb397a29..d41f591b7 100644 --- a/libp2p/pubsub/extensions.py +++ b/libp2p/pubsub/extensions.py @@ -16,6 +16,9 @@ from __future__ import annotations +from collections.abc import ( + Callable, +) from dataclasses import ( dataclass, field, @@ -32,6 +35,8 @@ logger = logging.getLogger("libp2p.pubsub.extensions") +ReportMisbehaviour = Callable[[ID], None] + @dataclass class PeerExtensions: @@ -130,10 +135,11 @@ class ExtensionsState: _sent_extensions: set[ID] = field(default_factory=set, init=False, repr=False) # Optional callback invoked when a peer sends a duplicate extensions message. - # Signature: report_misbehaviour(peer_id: ID) -> None - _report_misbehaviour: object = field(default=None, init=False, repr=False) + _report_misbehaviour: ReportMisbehaviour | None = field( + default=None, init=False, repr=False + ) - def set_report_misbehaviour(self, callback: object) -> None: + def set_report_misbehaviour(self, callback: ReportMisbehaviour | None) -> None: """ Register the callback that penalises misbehaving peers. @@ -229,8 +235,8 @@ def handle_rpc(self, rpc: rpc_pb2.RPC, peer_id: ID) -> None: "this is a protocol violation (GossipSub v1.3 spec rule 2).", peer_id, ) - if callable(self._report_misbehaviour): - self._report_misbehaviour(peer_id) # type: ignore[operator] + if self._report_misbehaviour is not None: + self._report_misbehaviour(peer_id) # ------------------------------------------------------------------ # Peer lifecycle From c71c735c5267c68c715c1b91afeb777ba66aadda Mon Sep 17 00:00:00 2001 From: Soham Bhoir <81645360+Winter-Soren@users.noreply.github.com> Date: Tue, 31 Mar 2026 15:14:27 +0530 Subject: [PATCH 14/15] Update docs/gossipsub-1.3.rst Co-authored-by: Paul Robinson <5199899+pacrob@users.noreply.github.com> --- docs/gossipsub-1.3.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/gossipsub-1.3.rst b/docs/gossipsub-1.3.rst index ff94b6234..3097bd7c7 100644 --- a/docs/gossipsub-1.3.rst +++ b/docs/gossipsub-1.3.rst @@ -38,7 +38,7 @@ Usage Example from libp2p import new_node from libp2p.pubsub.gossipsub import GossipSub - from libp2p.pubsub.pubsub import PubSub + from libp2p.pubsub.pubsub import Pubsub # Create node with GossipSub v1.3 node = await new_node() From 1ebcba97a96476c38a4de622eaf026846a4441e1 Mon Sep 17 00:00:00 2001 From: Winter-Soren Date: Tue, 31 Mar 2026 15:31:58 +0530 Subject: [PATCH 15/15] docs: fix gossipsub-1.3 topic observation snippet and clarify it as API usage --- docs/gossipsub-1.3.rst | 33 +++++++++++++++++++++------------ libp2p/pubsub/gossipsub.py | 4 ---- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/docs/gossipsub-1.3.rst b/docs/gossipsub-1.3.rst index 3097bd7c7..547f71ece 100644 --- a/docs/gossipsub-1.3.rst +++ b/docs/gossipsub-1.3.rst @@ -31,23 +31,32 @@ Lifecycle 3. **Stop observing**: Call ``stop_observing_topic(topic)`` to send UNOBSERVE control messages and stop receiving IHAVE notifications for that topic. -Usage Example -~~~~~~~~~~~~~ +API Usage Snippet +~~~~~~~~~~~~~~~~~ + +The snippet below demonstrates the Topic Observation API calls. It is not a +complete runnable program (host setup, service lifecycle, and peer wiring are +omitted for brevity). For a runnable end-to-end example, see +:doc:`examples.pubsub`. .. code-block:: python - from libp2p import new_node - from libp2p.pubsub.gossipsub import GossipSub + from libp2p import new_host + from libp2p.pubsub.gossipsub import PROTOCOL_ID_V13, GossipSub from libp2p.pubsub.pubsub import Pubsub - # Create node with GossipSub v1.3 - node = await new_node() - gossipsub = GossipSub() # Default config includes v1.3 protocols - pubsub = PubSub(gossipsub) - await node.start() - pubsub.set_pubsub(node) - - # Start observing a topic (no subscription; IHAVE-only notifications) + # Create host and Pubsub with a v1.3-capable GossipSub router. + host = new_host() + gossipsub = GossipSub( + protocols=[PROTOCOL_ID_V13], + degree=6, + degree_low=4, + degree_high=12, + ) + pubsub = Pubsub(host, gossipsub) + + # Start observing a topic (IHAVE-only presence notifications). + # In practice, call this once Pubsub/GossipSub services are running. await gossipsub.start_observing_topic("my-topic") # ... later, when done ... diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 6f4d6db02..4a24afede 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -2326,10 +2326,6 @@ def _report_extensions_misbehaviour(self, peer_id: ID) -> None: peer_id, ) - # ------------------------------------------------------------------ # - # GossipSub v1.3 – TODO for contributors - # ------------------------------------------------------------------ # - async def start_observing_topic(self, topic: str) -> None: """ Start observing *topic* by sending OBSERVE to all in-topic v1.3 peers