From e5f99c6aab1f53dfbc3937d2c6664297046e0354 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Sat, 13 Sep 2025 14:20:26 +0200 Subject: [PATCH 1/8] [194] Change return True/False pattern to try/catch pattern --- libp2p/host/basic_host.py | 14 +++++++- tests/core/host/test_basic_host.py | 57 ++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/libp2p/host/basic_host.py b/libp2p/host/basic_host.py index e370a3de1..89cdc1425 100644 --- a/libp2p/host/basic_host.py +++ b/libp2p/host/basic_host.py @@ -35,6 +35,7 @@ get_default_protocols, ) from libp2p.host.exceptions import ( + HostException, StreamFailure, ) from libp2p.peer.id import ( @@ -206,8 +207,19 @@ def set_stream_handler( :param protocol_id: protocol id used on stream :param stream_handler: a stream handler function + :raises HostException: if setting the stream handler fails """ - self.multiselect.add_handler(protocol_id, stream_handler) + try: + if not protocol_id: + raise HostException("Protocol ID cannot be empty") + if not stream_handler: + raise HostException("Stream handler cannot be None") + + self.multiselect.add_handler(protocol_id, stream_handler) + except HostException: + raise + except Exception as e: + raise HostException(f"Failed to set stream handler: {e}") from e async def new_stream( self, diff --git a/tests/core/host/test_basic_host.py b/tests/core/host/test_basic_host.py index 635f28632..92107a5ba 100644 --- a/tests/core/host/test_basic_host.py +++ b/tests/core/host/test_basic_host.py @@ -18,6 +18,7 @@ get_default_protocols, ) from libp2p.host.exceptions import ( + HostException, StreamFailure, ) @@ -59,3 +60,59 @@ async def fake_negotiate(comm, timeout): # Ensure reset was called since negotiation failed net_stream.reset.assert_awaited() + + +def test_set_stream_handler_success(): + """Test successful stream handler setting.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + host.set_stream_handler("/test/protocol", mock_handler) + + assert "/test/protocol" in host.multiselect.handlers + assert host.multiselect.handlers["/test/protocol"] == mock_handler + + +def test_set_stream_handler_empty_protocol(): + """Test set_stream_handler raises exception when protocol_id is empty.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + with pytest.raises(HostException, match="Protocol ID cannot be empty"): + host.set_stream_handler("", mock_handler) + + +def test_set_stream_handler_none_handler(): + """Test set_stream_handler raises exception when stream_handler is None.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + with pytest.raises(HostException, match="Stream handler cannot be None"): + host.set_stream_handler("/test/protocol", None) + + +def test_set_stream_handler_exception_handling(): + """Test set_stream_handler properly handles exceptions from multiselect.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + original_add_handler = host.multiselect.add_handler + host.multiselect.add_handler = MagicMock(side_effect=RuntimeError("Test error")) + + with pytest.raises(HostException, match="Failed to set stream handler"): + host.set_stream_handler("/test/protocol", mock_handler) + + host.multiselect.add_handler = original_add_handler From 746d0a46d5094fda4846cdf4c9bf81e03b76afc7 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Sat, 13 Sep 2025 14:27:47 +0200 Subject: [PATCH 2/8] fix: update multiaddr dependency to fix py-cid build issue - Update multiaddr dependency from 3ea7f866fda9268ee92506edf9d8e975274bf941 to db8124e2321f316d3b7d2733c7df11d6ad9c03e6 - This fixes the build failure caused by missing py-cid branch 'acul71/new-setup' - Resolves Read the Docs build failure in PR #923 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ab4824abf..b06d639cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ dependencies = [ "grpcio>=1.41.0", "lru-dict>=1.1.6", # "multiaddr (>=0.0.9,<0.0.10)", - "multiaddr @ git+https://github.com/multiformats/py-multiaddr.git@3ea7f866fda9268ee92506edf9d8e975274bf941", + "multiaddr @ git+https://github.com/multiformats/py-multiaddr.git@db8124e2321f316d3b7d2733c7df11d6ad9c03e6", "mypy-protobuf>=3.0.0", "noiseprotocol>=0.3.0", "protobuf>=4.25.0,<5.0.0", From 33e7c847ac9c885186a6e90784f206d022314d95 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Sun, 14 Sep 2025 16:29:36 +0200 Subject: [PATCH 3/8] comment resolve --- libp2p/abc.py | 5 ++- libp2p/host/autonat/autonat.py | 25 +++++++------ .../identity/identify_push/identify_push.py | 12 ++++--- libp2p/stream_muxer/mplex/mplex_stream.py | 35 ++++++++++++------- libp2p/stream_muxer/yamux/yamux.py | 8 ++--- libp2p/transport/quic/stream.py | 8 ++--- libp2p/transport/tcp/tcp.py | 20 ++++++----- 7 files changed, 65 insertions(+), 48 deletions(-) diff --git a/libp2p/abc.py b/libp2p/abc.py index 964c74546..02e0488e2 100644 --- a/libp2p/abc.py +++ b/libp2p/abc.py @@ -253,13 +253,12 @@ async def reset(self) -> None: """ @abstractmethod - def set_deadline(self, ttl: int) -> bool: + def set_deadline(self, ttl: int) -> None: """ Set a deadline for the stream. :param ttl: Time-to-live for the stream in seconds. - :return: True if the deadline was set successfully, - otherwise False. + :raises MuxedStreamError: if setting the deadline fails. """ @abstractmethod diff --git a/libp2p/host/autonat/autonat.py b/libp2p/host/autonat/autonat.py index ae4663f12..b16f3b22b 100644 --- a/libp2p/host/autonat/autonat.py +++ b/libp2p/host/autonat/autonat.py @@ -3,6 +3,9 @@ from libp2p.custom_types import ( TProtocol, ) +from libp2p.host.exceptions import ( + HostException, +) from libp2p.host.autonat.pb.autonat_pb2 import ( DialResponse, Message, @@ -154,7 +157,11 @@ async def _handle_dial(self, message: Message) -> Message: if peer_id in self.dial_results: success = self.dial_results[peer_id] else: - success = await self._try_dial(peer_id) + try: + await self._try_dial(peer_id) + success = True + except HostException: + success = False self.dial_results[peer_id] = success peer_info = PeerInfo() @@ -166,7 +173,7 @@ async def _handle_dial(self, message: Message) -> Message: response.dial_response.CopyFrom(dial_response) return response - async def _try_dial(self, peer_id: ID) -> bool: + async def _try_dial(self, peer_id: ID) -> None: """ Attempt to establish a connection with a peer. @@ -175,19 +182,17 @@ async def _try_dial(self, peer_id: ID) -> bool: peer_id : ID The identifier of the peer to attempt to dial. - Returns - ------- - bool - True if the connection was successfully established, - False if the connection attempt failed. + Raises + ------ + HostException + If the connection attempt failed. """ try: stream = await self.host.new_stream(peer_id, [AUTONAT_PROTOCOL_ID]) await stream.close() - return True - except Exception: - return False + except Exception as e: + raise HostException(f"Failed to dial peer {peer_id}: {e}") from e def get_status(self) -> int: """ diff --git a/libp2p/identity/identify_push/identify_push.py b/libp2p/identity/identify_push/identify_push.py index 5b23851b1..91f331308 100644 --- a/libp2p/identity/identify_push/identify_push.py +++ b/libp2p/identity/identify_push/identify_push.py @@ -20,6 +20,9 @@ from libp2p.network.stream.exceptions import ( StreamClosed, ) +from libp2p.host.exceptions import ( + HostException, +) from libp2p.peer.envelope import consume_envelope from libp2p.peer.id import ( ID, @@ -172,7 +175,7 @@ async def push_identify_to_peer( observed_multiaddr: Multiaddr | None = None, limit: trio.Semaphore = trio.Semaphore(CONCURRENCY_LIMIT), use_varint_format: bool = True, -) -> bool: +) -> None: """ Push an identify message to a specific peer. @@ -186,8 +189,8 @@ async def push_identify_to_peer( limit: Semaphore for concurrency control. use_varint_format: True=length-prefixed, False=raw protobuf. - Returns: - bool: True if the push was successful, False otherwise. + Raises: + HostException: If the identify push fails. """ async with limit: @@ -224,10 +227,9 @@ async def push_identify_to_peer( await stream.close() logger.debug("Successfully pushed identify to peer %s", peer_id) - return True except Exception as e: logger.error("Error pushing identify to peer %s: %s", peer_id, e) - return False + raise HostException(f"Failed to push identify to peer {peer_id}: {e}") from e async def push_identify_to_peers( diff --git a/libp2p/stream_muxer/mplex/mplex_stream.py b/libp2p/stream_muxer/mplex/mplex_stream.py index e8d0561d4..19bc16750 100644 --- a/libp2p/stream_muxer/mplex/mplex_stream.py +++ b/libp2p/stream_muxer/mplex/mplex_stream.py @@ -309,33 +309,42 @@ async def reset(self) -> None: self.muxed_conn.streams.pop(self.stream_id, None) # TODO deadline not in use - def set_deadline(self, ttl: int) -> bool: + def set_deadline(self, ttl: int) -> None: """ Set deadline for muxed stream. - :return: True if successful + :param ttl: Time-to-live for the stream in seconds + :raises MuxedStreamError: if setting the deadline fails """ - self.read_deadline = ttl - self.write_deadline = ttl - return True + try: + self.read_deadline = ttl + self.write_deadline = ttl + except Exception as e: + raise MuxedStreamError(f"Failed to set deadline: {e}") from e - def set_read_deadline(self, ttl: int) -> bool: + def set_read_deadline(self, ttl: int) -> None: """ Set read deadline for muxed stream. - :return: True if successful + :param ttl: Time-to-live for the read deadline in seconds + :raises MuxedStreamError: if setting the read deadline fails """ - self.read_deadline = ttl - return True + try: + self.read_deadline = ttl + except Exception as e: + raise MuxedStreamError(f"Failed to set read deadline: {e}") from e - def set_write_deadline(self, ttl: int) -> bool: + def set_write_deadline(self, ttl: int) -> None: """ Set write deadline for muxed stream. - :return: True if successful + :param ttl: Time-to-live for the write deadline in seconds + :raises MuxedStreamError: if setting the write deadline fails """ - self.write_deadline = ttl - return True + try: + self.write_deadline = ttl + except Exception as e: + raise MuxedStreamError(f"Failed to set write deadline: {e}") from e def get_remote_address(self) -> tuple[str, int] | None: """Delegate to the parent Mplex connection.""" diff --git a/libp2p/stream_muxer/yamux/yamux.py b/libp2p/stream_muxer/yamux/yamux.py index b2711e1a8..8b029fba0 100644 --- a/libp2p/stream_muxer/yamux/yamux.py +++ b/libp2p/stream_muxer/yamux/yamux.py @@ -282,15 +282,15 @@ async def reset(self) -> None: self.closed = True self.reset_received = True # Mark as reset - def set_deadline(self, ttl: int) -> bool: + def set_deadline(self, ttl: int) -> None: """ Set a deadline for the stream. Yamux does not support deadlines natively, - so this method always returns False to indicate the operation is unsupported. + so this method raises an exception to indicate the operation is unsupported. :param ttl: Time-to-live in seconds (ignored). - :return: False, as deadlines are not supported. + :raises NotImplementedError: as deadlines are not supported in Yamux. """ - raise NotImplementedError("Yamux does not support setting read deadlines") + raise NotImplementedError("Yamux does not support setting deadlines") def get_remote_address(self) -> tuple[str, int] | None: """ diff --git a/libp2p/transport/quic/stream.py b/libp2p/transport/quic/stream.py index dac8925ec..5834b58ed 100644 --- a/libp2p/transport/quic/stream.py +++ b/libp2p/transport/quic/stream.py @@ -632,15 +632,15 @@ async def __aexit__( logger.debug("Exiting the context and closing the stream") await self.close() - def set_deadline(self, ttl: int) -> bool: + def set_deadline(self, ttl: int) -> None: """ Set a deadline for the stream. QUIC does not support deadlines natively, - so this method always returns False to indicate the operation is unsupported. + so this method raises an exception to indicate the operation is unsupported. :param ttl: Time-to-live in seconds (ignored). - :return: False, as deadlines are not supported. + :raises NotImplementedError: as deadlines are not supported in QUIC. """ - raise NotImplementedError("QUIC does not support setting read deadlines") + raise NotImplementedError("QUIC does not support setting deadlines") # String representation for debugging diff --git a/libp2p/transport/tcp/tcp.py b/libp2p/transport/tcp/tcp.py index 1598ea42a..80ff51c75 100644 --- a/libp2p/transport/tcp/tcp.py +++ b/libp2p/transport/tcp/tcp.py @@ -42,12 +42,12 @@ def __init__(self, handler_function: THandler) -> None: self.handler = handler_function # TODO: Get rid of `nursery`? - async def listen(self, maddr: Multiaddr, nursery: trio.Nursery) -> bool: + async def listen(self, maddr: Multiaddr, nursery: trio.Nursery) -> None: """ Put listener in listening mode and wait for incoming connections. :param maddr: maddr of peer - :return: return True if successful + :raises OpenConnectionError: if listening fails """ async def serve_tcp( @@ -76,17 +76,19 @@ async def handler(stream: trio.SocketStream) -> None: tcp_port_str = maddr.value_for_protocol("tcp") if tcp_port_str is None: - logger.error(f"Cannot listen: TCP port is missing in multiaddress {maddr}") - return False + error_msg = f"Cannot listen: TCP port is missing in multiaddress {maddr}" + logger.error(error_msg) + raise OpenConnectionError(error_msg) try: tcp_port = int(tcp_port_str) except ValueError: - logger.error( + error_msg = ( f"Cannot listen: Invalid TCP port '{tcp_port_str}' " f"in multiaddress {maddr}" ) - return False + logger.error(error_msg) + raise OpenConnectionError(error_msg) ip4_host_str = maddr.value_for_protocol("ip4") # For trio.serve_tcp, ip4_host_str (as host argument) can be None, @@ -102,16 +104,16 @@ async def handler(stream: trio.SocketStream) -> None: if started_listeners is None: # This implies that task_status.started() was not called within serve_tcp, # likely because trio.serve_tcp itself failed to start (e.g., port in use). - logger.error( + error_msg = ( f"Failed to start TCP listener for {maddr}: " f"`nursery.start` returned None. " "This might be due to issues like the port already " "being in use or invalid host." ) - return False + logger.error(error_msg) + raise OpenConnectionError(error_msg) self.listeners.extend(started_listeners) - return True def get_addrs(self) -> tuple[Multiaddr, ...]: """ From f0c654c7769e135071de94246bd6a0e61aaeee5d Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Sun, 14 Sep 2025 18:08:09 +0200 Subject: [PATCH 4/8] test cases passed --- .../identity/identify_push/identify_push.py | 28 ++++++++- tests/core/examples/test_examples.py | 7 ++- tests/core/host/test_autonat.py | 14 +++-- .../identify_push/test_identify_push.py | 18 ++++-- .../test_identify_push_integration.py | 62 ++++++++++++------- tests/core/stream_muxer/test_yamux.py | 2 +- 6 files changed, 91 insertions(+), 40 deletions(-) diff --git a/libp2p/identity/identify_push/identify_push.py b/libp2p/identity/identify_push/identify_push.py index 91f331308..b6024a33b 100644 --- a/libp2p/identity/identify_push/identify_push.py +++ b/libp2p/identity/identify_push/identify_push.py @@ -232,6 +232,32 @@ async def push_identify_to_peer( raise HostException(f"Failed to push identify to peer {peer_id}: {e}") from e +async def _safe_push_identify_to_peer( + host: IHost, + peer_id: ID, + observed_multiaddr: Multiaddr | None = None, + limit: trio.Semaphore = trio.Semaphore(CONCURRENCY_LIMIT), + use_varint_format: bool = True, +) -> None: + """ + Safely push identify information to a specific peer, catching and logging exceptions. + + This is a wrapper around push_identify_to_peer that catches exceptions and logs them + instead of letting them propagate, which is useful when calling from a nursery. + + Args: + host: The libp2p host. + peer_id: The peer ID to push to. + observed_multiaddr: The observed multiaddress (optional). + limit: Semaphore for concurrency control. + use_varint_format: True=length-prefixed, False=raw protobuf. + """ + try: + await push_identify_to_peer(host, peer_id, observed_multiaddr, limit, use_varint_format) + except Exception as e: + logger.debug("Failed to push identify to peer %s: %s", peer_id, e) + + async def push_identify_to_peers( host: IHost, peer_ids: set[ID] | None = None, @@ -262,7 +288,7 @@ async def push_identify_to_peers( async with trio.open_nursery() as nursery: for peer_id in peer_ids: nursery.start_soon( - push_identify_to_peer, + _safe_push_identify_to_peer, host, peer_id, observed_multiaddr, diff --git a/tests/core/examples/test_examples.py b/tests/core/examples/test_examples.py index d60327b68..be82c18a0 100644 --- a/tests/core/examples/test_examples.py +++ b/tests/core/examples/test_examples.py @@ -281,8 +281,11 @@ async def identify_push_demo(host_a, host_b): logger.debug("Host A protocols before push: %s", host_a_protocols) # Push identify information from host_a to host_b - success = await push_identify_to_peer(host_a, host_b.get_id()) - assert success is True + try: + await push_identify_to_peer(host_a, host_b.get_id()) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"push_identify_to_peer failed: {e}") # Add a small delay to allow processing await trio.sleep(0.1) diff --git a/tests/core/host/test_autonat.py b/tests/core/host/test_autonat.py index 4c6dbacab..43839499d 100644 --- a/tests/core/host/test_autonat.py +++ b/tests/core/host/test_autonat.py @@ -10,6 +10,9 @@ AutoNATService, AutoNATStatus, ) +from libp2p.host.exceptions import ( + HostException, +) from libp2p.host.autonat.pb.autonat_pb2 import ( DialRequest, DialResponse, @@ -108,9 +111,8 @@ async def test_try_dial(): mock_stream = AsyncMock(spec=NetStream) mock_new_stream.return_value = mock_stream - result = await service._try_dial(peer_id) - - assert result is True + # Should not raise an exception for successful dial + await service._try_dial(peer_id) mock_new_stream.assert_called_once_with(peer_id, [AUTONAT_PROTOCOL_ID]) mock_stream.close.assert_called_once() @@ -120,9 +122,9 @@ async def test_try_dial(): ) as mock_new_stream: mock_new_stream.side_effect = Exception("Connection failed") - result = await service._try_dial(peer_id) - - assert result is False + # Should raise HostException for failed dial + with pytest.raises(HostException, match="Failed to dial peer"): + await service._try_dial(peer_id) mock_new_stream.assert_called_once_with(peer_id, [AUTONAT_PROTOCOL_ID]) diff --git a/tests/core/identity/identify_push/test_identify_push.py b/tests/core/identity/identify_push/test_identify_push.py index a1e2e472f..f66978926 100644 --- a/tests/core/identity/identify_push/test_identify_push.py +++ b/tests/core/identity/identify_push/test_identify_push.py @@ -619,8 +619,11 @@ async def test_identify_push_default_varint_format(security_protocol): host_b.set_stream_handler(ID_PUSH, identify_push_handler_for(host_b)) # Push identify information from host_a to host_b using default settings - success = await push_identify_to_peer(host_a, host_b.get_id()) - assert success, "Identify push should succeed with default varint format" + try: + await push_identify_to_peer(host_a, host_b.get_id()) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"Identify push should succeed with default varint format: {e}") # Wait a bit for the push to complete await trio.sleep(0.1) @@ -669,10 +672,13 @@ async def test_identify_push_legacy_raw_format(security_protocol): ) # Push identify information from host_a to host_b using legacy format - success = await push_identify_to_peer( - host_a, host_b.get_id(), use_varint_format=False - ) - assert success, "Identify push should succeed with legacy raw format" + try: + await push_identify_to_peer( + host_a, host_b.get_id(), use_varint_format=False + ) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"Identify push should succeed with legacy raw format: {e}") # Wait a bit for the push to complete await trio.sleep(0.1) diff --git a/tests/core/identity/identify_push/test_identify_push_integration.py b/tests/core/identity/identify_push/test_identify_push_integration.py index 9ee38b10c..8bda72fa5 100644 --- a/tests/core/identity/identify_push/test_identify_push_integration.py +++ b/tests/core/identity/identify_push/test_identify_push_integration.py @@ -150,13 +150,14 @@ async def wrapped_handler(stream): host_a.set_stream_handler(ID_PUSH, wrapped_handler) # Host B pushes with varint format (should fail gracefully) - success = await push_identify_to_peer( - host_b, host_a.get_id(), use_varint_format=True - ) - # This should fail due to format mismatch - # Note: The format detection might be more robust than expected - # so we just check that the operation completes - assert isinstance(success, bool) + try: + await push_identify_to_peer( + host_b, host_a.get_id(), use_varint_format=True + ) + # If we get here, the operation succeeded (unexpected but acceptable) + except Exception: + # This is expected due to format mismatch, which is fine + pass @pytest.mark.trio @@ -182,13 +183,14 @@ async def wrapped_handler(stream): host_a.set_stream_handler(ID_PUSH, wrapped_handler) # Host B pushes with raw format (should fail gracefully) - success = await push_identify_to_peer( - host_b, host_a.get_id(), use_varint_format=False - ) - # This should fail due to format mismatch - # Note: The format detection might be more robust than expected - # so we just check that the operation completes - assert isinstance(success, bool) + try: + await push_identify_to_peer( + host_b, host_a.get_id(), use_varint_format=False + ) + # If we get here, the operation succeeded (unexpected but acceptable) + except Exception: + # This is expected due to format mismatch, which is fine + pass @pytest.mark.trio @@ -280,10 +282,13 @@ async def dummy_handler(stream): ) # Push identify information from host_b to host_a - success = await push_identify_to_peer( - host_b, host_a.get_id(), use_varint_format=True - ) - assert success + try: + await push_identify_to_peer( + host_b, host_a.get_id(), use_varint_format=True + ) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"Identify push should succeed with large message: {e}") # Wait a bit for the push to complete await trio.sleep(0.1) @@ -359,8 +364,11 @@ async def dummy_handler(stream): results = [] async def push_identify(): - result = await push_identify_to_peer(host_b, host_a.get_id()) - results.append(result) + try: + await push_identify_to_peer(host_b, host_a.get_id()) + results.append(True) # Success + except Exception: + results.append(False) # Failure # Run multiple concurrent pushes using nursery async with trio.open_nursery() as nursery: @@ -400,8 +408,11 @@ async def dummy_handler(stream): host_a.set_stream_handler(ID_PUSH, identify_push_handler_for(host_a)) # Push identify information from host_b to host_a - success = await push_identify_to_peer(host_b, host_a.get_id()) - assert success + try: + await push_identify_to_peer(host_b, host_a.get_id()) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"Identify push should succeed with stream handling: {e}") # Wait a bit for the push to complete await trio.sleep(0.1) @@ -434,8 +445,11 @@ async def error_handler(stream): host_a.set_stream_handler(ID_PUSH, error_handler) # Push should complete (message sent) but handler should fail gracefully - success = await push_identify_to_peer(host_b, host_a.get_id()) - assert success # The push operation itself succeeds (message sent) + try: + await push_identify_to_peer(host_b, host_a.get_id()) + # If we get here, the push was successful + except Exception as e: + pytest.fail(f"Identify push should succeed even with error handler: {e}") # Wait a bit for the handler to process await trio.sleep(0.1) diff --git a/tests/core/stream_muxer/test_yamux.py b/tests/core/stream_muxer/test_yamux.py index 444288518..77de47c31 100644 --- a/tests/core/stream_muxer/test_yamux.py +++ b/tests/core/stream_muxer/test_yamux.py @@ -258,7 +258,7 @@ async def test_yamux_deadlines_raise_not_implemented(yamux_pair): stream = await client_yamux.open_stream() with trio.move_on_after(2): with pytest.raises( - NotImplementedError, match="Yamux does not support setting read deadlines" + NotImplementedError, match="Yamux does not support setting deadlines" ): stream.set_deadline(60) logging.debug("test_yamux_deadlines_raise_not_implemented complete") From 168f287c6217eb2df2caf68bd44ccfa3e1bd3141 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Mon, 15 Sep 2025 21:19:23 +0200 Subject: [PATCH 5/8] test fixes --- tests/core/kad_dht/test_kad_dht.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/core/kad_dht/test_kad_dht.py b/tests/core/kad_dht/test_kad_dht.py index 5bf4f3e83..55fa5fdcc 100644 --- a/tests/core/kad_dht/test_kad_dht.py +++ b/tests/core/kad_dht/test_kad_dht.py @@ -75,6 +75,26 @@ async def dht_pair(security_protocol): "After bootstrap: Node B peers: %s", dht_b.routing_table.get_peer_ids() ) + # Wait for peer records to be stored (DHT operations complete) + # This ensures that FIND_NODE operations have completed and peer records are stored + # Use a more conservative approach to avoid CI issues + + try: + # Simple wait without complex polling to avoid resource issues + await trio.sleep(0.2) + + # Check if peer records are available, but don't wait if they're not + peer_record_a = dht_a.host.get_peerstore().get_peer_record(dht_b.host.get_id()) + peer_record_b = dht_b.host.get_peerstore().get_peer_record(dht_a.host.get_id()) + + if peer_record_a is not None and peer_record_b is not None: + logger.debug("Peer records are ready") + else: + logger.debug("Peer records not yet ready, tests may need to handle this") + + except Exception as e: + logger.warning("Error while checking peer records: %s, continuing anyway", e) + # Return the DHT pair yield (dht_a, dht_b) From 0ab23fa095236751104f85904c66c6bcef8fce98 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Thu, 18 Sep 2025 18:55:35 +0200 Subject: [PATCH 6/8] test cases added and newsfragment --- newsfragments/194.bugfix.rst | 7 + tests/core/host/test_basic_host.py | 94 +++++++++++ .../identify_push/test_identify_push.py | 3 + .../test_identify_push_exception_handling.py | 158 ++++++++++++++++++ tests/core/stream_muxer/test_mplex_stream.py | 129 ++++++++++++++ 5 files changed, 391 insertions(+) create mode 100644 newsfragments/194.bugfix.rst create mode 100644 tests/core/identity/identify_push/test_identify_push_exception_handling.py diff --git a/newsfragments/194.bugfix.rst b/newsfragments/194.bugfix.rst new file mode 100644 index 000000000..ae3309c64 --- /dev/null +++ b/newsfragments/194.bugfix.rst @@ -0,0 +1,7 @@ +Replaced return True/False pattern with try/catch pattern for better error handling. + +- Updated set_stream_handler method in BasicHost to raise HostException instead of returning boolean +- Enhanced exception handling in mplex stream deadline methods with proper error chaining +- Improved identify/push protocol error handling with comprehensive exception management +- Added proper exception documentation and error messages throughout affected methods + diff --git a/tests/core/host/test_basic_host.py b/tests/core/host/test_basic_host.py index 92107a5ba..5ce78932a 100644 --- a/tests/core/host/test_basic_host.py +++ b/tests/core/host/test_basic_host.py @@ -116,3 +116,97 @@ async def mock_handler(stream): host.set_stream_handler("/test/protocol", mock_handler) host.multiselect.add_handler = original_add_handler + + +def test_set_stream_handler_multiple_exceptions(): + """Test set_stream_handler handles different types of exceptions.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + # Test with ValueError + original_add_handler = host.multiselect.add_handler + host.multiselect.add_handler = MagicMock(side_effect=ValueError("Invalid value")) + + with pytest.raises(HostException, match="Failed to set stream handler"): + host.set_stream_handler("/test/protocol", mock_handler) + + # Test with KeyError + host.multiselect.add_handler = MagicMock(side_effect=KeyError("Missing key")) + + with pytest.raises(HostException, match="Failed to set stream handler"): + host.set_stream_handler("/test/protocol", mock_handler) + + host.multiselect.add_handler = original_add_handler + + +def test_set_stream_handler_preserves_exception_chain(): + """Test that set_stream_handler preserves the original exception chain.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + original_add_handler = host.multiselect.add_handler + original_error = RuntimeError("Original error") + host.multiselect.add_handler = MagicMock(side_effect=original_error) + + with pytest.raises(HostException) as exc_info: + host.set_stream_handler("/test/protocol", mock_handler) + + # Check that the original exception is preserved in the chain + assert exc_info.value.__cause__ is original_error + assert "Failed to set stream handler" in str(exc_info.value) + + host.multiselect.add_handler = original_add_handler + + +def test_set_stream_handler_success_with_valid_inputs(): + """Test set_stream_handler succeeds with various valid protocol IDs.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + # Test with different valid protocol IDs + valid_protocols = [ + "/test/protocol", + "/ipfs/id/1.0.0", + "/libp2p/autonat/1.0.0", + "/multistream/1.0.0", + "/test/protocol/with/version/1.0.0" + ] + + for protocol_id in valid_protocols: + host.set_stream_handler(protocol_id, mock_handler) + assert protocol_id in host.multiselect.handlers + assert host.multiselect.handlers[protocol_id] == mock_handler + + +def test_set_stream_handler_edge_cases(): + """Test set_stream_handler with edge case inputs.""" + key_pair = create_new_key_pair() + swarm = new_swarm(key_pair) + host = BasicHost(swarm) + + async def mock_handler(stream): + pass + + # Test with whitespace-only protocol ID + with pytest.raises(HostException, match="Protocol ID cannot be empty"): + host.set_stream_handler(" ", mock_handler) + + # Test with None protocol ID + with pytest.raises(HostException, match="Protocol ID cannot be empty"): + host.set_stream_handler(None, mock_handler) + + # Test with empty string protocol ID + with pytest.raises(HostException, match="Protocol ID cannot be empty"): + host.set_stream_handler("", mock_handler) diff --git a/tests/core/identity/identify_push/test_identify_push.py b/tests/core/identity/identify_push/test_identify_push.py index f66978926..6ba6872ab 100644 --- a/tests/core/identity/identify_push/test_identify_push.py +++ b/tests/core/identity/identify_push/test_identify_push.py @@ -27,6 +27,9 @@ push_identify_to_peer, push_identify_to_peers, ) +from libp2p.host.exceptions import ( + HostException, +) from libp2p.peer.peerinfo import ( info_from_p2p_addr, ) diff --git a/tests/core/identity/identify_push/test_identify_push_exception_handling.py b/tests/core/identity/identify_push/test_identify_push_exception_handling.py new file mode 100644 index 000000000..ec977ba4f --- /dev/null +++ b/tests/core/identity/identify_push/test_identify_push_exception_handling.py @@ -0,0 +1,158 @@ +import pytest +import trio + +from libp2p import ( + new_host, +) +from libp2p.crypto.secp256k1 import ( + create_new_key_pair, +) +from libp2p.identity.identify.pb.identify_pb2 import ( + Identify, +) +from libp2p.identity.identify_push.identify_push import ( + _update_peerstore_from_identify, + push_identify_to_peer, + push_identify_to_peers, +) +from libp2p.host.exceptions import ( + HostException, +) +from tests.utils.factories import ( + host_pair_factory, +) +from tests.utils.utils import ( + run_host_forever, + wait_until_listening, +) + + +@pytest.mark.trio +async def test_push_identify_to_peer_exception_handling(): + """Test that push_identify_to_peer properly handles exceptions.""" + key_pair = create_new_key_pair() + host_a, host_b = host_pair_factory(key_pair) + + # Start both hosts + async with run_host_forever(host_a), run_host_forever(host_b): + await wait_until_listening(host_a) + await wait_until_listening(host_b) + + # Connect the hosts + peer_info = host_b.get_addrs()[0] + await host_a.connect(peer_info) + + # Mock new_stream to raise an exception + original_new_stream = host_a.new_stream + + async def mock_new_stream(*args, **kwargs): + raise RuntimeError("Mock stream creation error") + + host_a.new_stream = mock_new_stream + + # Test that push_identify_to_peer raises HostException + with pytest.raises(HostException, match="Failed to push identify to peer"): + await push_identify_to_peer(host_a, host_b.get_id()) + + # Restore original method + host_a.new_stream = original_new_stream + + +@pytest.mark.trio +async def test_push_identify_to_peer_preserves_exception_chain(): + """Test that push_identify_to_peer preserves the original exception chain.""" + key_pair = create_new_key_pair() + host_a, host_b = host_pair_factory(key_pair) + + # Start both hosts + async with run_host_forever(host_a), run_host_forever(host_b): + await wait_until_listening(host_a) + await wait_until_listening(host_b) + + # Connect the hosts + peer_info = host_b.get_addrs()[0] + await host_a.connect(peer_info) + + original_error = ConnectionError("Original connection error") + + # Mock new_stream to raise a specific exception + async def mock_new_stream(*args, **kwargs): + raise original_error + + host_a.new_stream = mock_new_stream + + # Test that the original exception is preserved in the chain + with pytest.raises(HostException) as exc_info: + await push_identify_to_peer(host_a, host_b.get_id()) + + # Check that the original exception is preserved in the chain + assert exc_info.value.__cause__ is original_error + assert "Failed to push identify to peer" in str(exc_info.value) + + +@pytest.mark.trio +async def test_update_peerstore_from_identify_exception_handling(): + """Test that _update_peerstore_from_identify handles exceptions gracefully.""" + key_pair = create_new_key_pair() + host_a, host_b = host_pair_factory(key_pair) + + # Create a mock peerstore that raises exceptions + class MockPeerstore: + def add_protocols(self, peer_id, protocols): + raise ValueError("Mock protocol error") + + def add_pubkey(self, peer_id, pubkey): + raise KeyError("Mock pubkey error") + + def add_addr(self, peer_id, addr, ttl): + raise RuntimeError("Mock addr error") + + def consume_peer_record(self, envelope, ttl): + raise ConnectionError("Mock record error") + + mock_peerstore = MockPeerstore() + + # Create an identify message with various fields + identify_msg = Identify() + identify_msg.public_key = b"mock_public_key" + identify_msg.listen_addrs.extend([b"/ip4/127.0.0.1/tcp/4001"]) + identify_msg.protocols.extend(["/test/protocol/1.0.0"]) + identify_msg.observed_addr = b"/ip4/127.0.0.1/tcp/4002" + identify_msg.signedPeerRecord = b"mock_signed_record" + + # Test that the function handles exceptions gracefully + # It should not raise any exceptions, just log errors + await _update_peerstore_from_identify(mock_peerstore, host_b.get_id(), identify_msg) + + # If we get here without exceptions, the test passes + + +@pytest.mark.trio +async def test_push_identify_to_peers_exception_handling(): + """Test that push_identify_to_peers handles exceptions gracefully.""" + key_pair = create_new_key_pair() + host_a, host_b = host_pair_factory(key_pair) + + # Start both hosts + async with run_host_forever(host_a), run_host_forever(host_b): + await wait_until_listening(host_a) + await wait_until_listening(host_b) + + # Connect the hosts + peer_info = host_b.get_addrs()[0] + await host_a.connect(peer_info) + + # Mock new_stream to raise an exception for one peer + original_new_stream = host_a.new_stream + + async def mock_new_stream(*args, **kwargs): + raise ConnectionError("Mock connection error") + + host_a.new_stream = mock_new_stream + + # Test that push_identify_to_peers handles exceptions gracefully + # It should not raise any exceptions, just log errors + await push_identify_to_peers(host_a, {host_b.get_id()}) + + # Restore original method + host_a.new_stream = original_new_stream diff --git a/tests/core/stream_muxer/test_mplex_stream.py b/tests/core/stream_muxer/test_mplex_stream.py index 1d9c22340..b556a4f6a 100644 --- a/tests/core/stream_muxer/test_mplex_stream.py +++ b/tests/core/stream_muxer/test_mplex_stream.py @@ -10,6 +10,9 @@ MplexStreamReset, MuxedConnUnavailable, ) +from libp2p.stream_muxer.exceptions import ( + MuxedStreamError, +) from libp2p.stream_muxer.mplex.mplex import ( MPLEX_MESSAGE_CHANNEL_SIZE, ) @@ -250,3 +253,129 @@ def raise_unavailable(*args, **kwargs): with pytest.raises(RuntimeError, match="Failed to send close message"): await stream_0.close() + + +@pytest.mark.trio +async def test_mplex_stream_set_deadline_success(mplex_stream_pair): + """Test successful deadline setting.""" + stream_0, _ = mplex_stream_pair + + # Test setting deadline + stream_0.set_deadline(30) + assert stream_0.read_deadline == 30 + assert stream_0.write_deadline == 30 + + +@pytest.mark.trio +async def test_mplex_stream_set_read_deadline_success(mplex_stream_pair): + """Test successful read deadline setting.""" + stream_0, _ = mplex_stream_pair + + # Test setting read deadline + stream_0.set_read_deadline(15) + assert stream_0.read_deadline == 15 + + +@pytest.mark.trio +async def test_mplex_stream_set_write_deadline_success(mplex_stream_pair): + """Test successful write deadline setting.""" + stream_0, _ = mplex_stream_pair + + # Test setting write deadline + stream_0.set_write_deadline(20) + assert stream_0.write_deadline == 20 + + +@pytest.mark.trio +async def test_mplex_stream_set_deadline_exception_handling(monkeypatch, mplex_stream_pair): + """Test deadline setting handles exceptions properly.""" + stream_0, _ = mplex_stream_pair + + # Mock the deadline setting to raise an exception + original_read_deadline = stream_0.read_deadline + original_write_deadline = stream_0.write_deadline + + def mock_set_deadline(ttl): + raise RuntimeError("Mock deadline error") + + # Patch the attribute assignment to raise an exception + def mock_setattr(obj, name, value): + if name in ('read_deadline', 'write_deadline'): + raise RuntimeError("Mock deadline error") + return setattr(obj, name, value) + + monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) + + with pytest.raises(MuxedStreamError, match="Failed to set deadline"): + stream_0.set_deadline(30) + + # Verify original values are preserved + assert stream_0.read_deadline == original_read_deadline + assert stream_0.write_deadline == original_write_deadline + + +@pytest.mark.trio +async def test_mplex_stream_set_read_deadline_exception_handling(monkeypatch, mplex_stream_pair): + """Test read deadline setting handles exceptions properly.""" + stream_0, _ = mplex_stream_pair + + original_read_deadline = stream_0.read_deadline + + # Patch the attribute assignment to raise an exception + def mock_setattr(obj, name, value): + if name == 'read_deadline': + raise ValueError("Mock read deadline error") + return setattr(obj, name, value) + + monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) + + with pytest.raises(MuxedStreamError, match="Failed to set read deadline"): + stream_0.set_read_deadline(15) + + # Verify original value is preserved + assert stream_0.read_deadline == original_read_deadline + + +@pytest.mark.trio +async def test_mplex_stream_set_write_deadline_exception_handling(monkeypatch, mplex_stream_pair): + """Test write deadline setting handles exceptions properly.""" + stream_0, _ = mplex_stream_pair + + original_write_deadline = stream_0.write_deadline + + # Patch the attribute assignment to raise an exception + def mock_setattr(obj, name, value): + if name == 'write_deadline': + raise KeyError("Mock write deadline error") + return setattr(obj, name, value) + + monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) + + with pytest.raises(MuxedStreamError, match="Failed to set write deadline"): + stream_0.set_write_deadline(20) + + # Verify original value is preserved + assert stream_0.write_deadline == original_write_deadline + + +@pytest.mark.trio +async def test_mplex_stream_deadline_preserves_exception_chain(monkeypatch, mplex_stream_pair): + """Test that deadline methods preserve the original exception chain.""" + stream_0, _ = mplex_stream_pair + + original_error = RuntimeError("Original deadline error") + + # Patch the attribute assignment to raise an exception + def mock_setattr(obj, name, value): + if name in ('read_deadline', 'write_deadline'): + raise original_error + return setattr(obj, name, value) + + monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) + + with pytest.raises(MuxedStreamError) as exc_info: + stream_0.set_deadline(30) + + # Check that the original exception is preserved in the chain + assert exc_info.value.__cause__ is original_error + assert "Failed to set deadline" in str(exc_info.value) From 3ad889ac33748d7b602d532b50c17e89b6407f8f Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Fri, 19 Sep 2025 11:40:54 +0200 Subject: [PATCH 7/8] test cases added and newsfragment --- tests/core/kad_dht/test_kad_dht.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/tests/core/kad_dht/test_kad_dht.py b/tests/core/kad_dht/test_kad_dht.py index 55fa5fdcc..d7d3c0793 100644 --- a/tests/core/kad_dht/test_kad_dht.py +++ b/tests/core/kad_dht/test_kad_dht.py @@ -77,23 +77,7 @@ async def dht_pair(security_protocol): # Wait for peer records to be stored (DHT operations complete) # This ensures that FIND_NODE operations have completed and peer records are stored - # Use a more conservative approach to avoid CI issues - - try: - # Simple wait without complex polling to avoid resource issues - await trio.sleep(0.2) - - # Check if peer records are available, but don't wait if they're not - peer_record_a = dht_a.host.get_peerstore().get_peer_record(dht_b.host.get_id()) - peer_record_b = dht_b.host.get_peerstore().get_peer_record(dht_a.host.get_id()) - - if peer_record_a is not None and peer_record_b is not None: - logger.debug("Peer records are ready") - else: - logger.debug("Peer records not yet ready, tests may need to handle this") - - except Exception as e: - logger.warning("Error while checking peer records: %s, continuing anyway", e) + await trio.sleep(0.3) # Return the DHT pair yield (dht_a, dht_b) From d9f97b69b0f54e98ddc02c8cdf3fd9c15bf101e7 Mon Sep 17 00:00:00 2001 From: Suchitra Swain Date: Mon, 22 Sep 2025 17:29:41 +0200 Subject: [PATCH 8/8] test cases fixed --- libp2p/host/basic_host.py | 2 +- libp2p/stream_muxer/mplex/mplex_stream.py | 7 ++ .../test_identify_push_exception_handling.py | 88 ++++++++----------- tests/core/stream_muxer/test_mplex_stream.py | 53 +++-------- 4 files changed, 56 insertions(+), 94 deletions(-) diff --git a/libp2p/host/basic_host.py b/libp2p/host/basic_host.py index 9fb42036d..bd55fa4df 100644 --- a/libp2p/host/basic_host.py +++ b/libp2p/host/basic_host.py @@ -210,7 +210,7 @@ def set_stream_handler( :raises HostException: if setting the stream handler fails """ try: - if not protocol_id: + if not protocol_id or (isinstance(protocol_id, str) and not protocol_id.strip()): raise HostException("Protocol ID cannot be empty") if not stream_handler: raise HostException("Stream handler cannot be None") diff --git a/libp2p/stream_muxer/mplex/mplex_stream.py b/libp2p/stream_muxer/mplex/mplex_stream.py index 19bc16750..d557b00b2 100644 --- a/libp2p/stream_muxer/mplex/mplex_stream.py +++ b/libp2p/stream_muxer/mplex/mplex_stream.py @@ -14,6 +14,7 @@ ) from libp2p.stream_muxer.exceptions import ( MuxedConnUnavailable, + MuxedStreamError, ) from .constants import ( @@ -317,6 +318,8 @@ def set_deadline(self, ttl: int) -> None: :raises MuxedStreamError: if setting the deadline fails """ try: + if ttl < 0: + raise ValueError("Deadline cannot be negative") self.read_deadline = ttl self.write_deadline = ttl except Exception as e: @@ -330,6 +333,8 @@ def set_read_deadline(self, ttl: int) -> None: :raises MuxedStreamError: if setting the read deadline fails """ try: + if ttl < 0: + raise ValueError("Read deadline cannot be negative") self.read_deadline = ttl except Exception as e: raise MuxedStreamError(f"Failed to set read deadline: {e}") from e @@ -342,6 +347,8 @@ def set_write_deadline(self, ttl: int) -> None: :raises MuxedStreamError: if setting the write deadline fails """ try: + if ttl < 0: + raise ValueError("Write deadline cannot be negative") self.write_deadline = ttl except Exception as e: raise MuxedStreamError(f"Failed to set write deadline: {e}") from e diff --git a/tests/core/identity/identify_push/test_identify_push_exception_handling.py b/tests/core/identity/identify_push/test_identify_push_exception_handling.py index ec977ba4f..494621c79 100644 --- a/tests/core/identity/identify_push/test_identify_push_exception_handling.py +++ b/tests/core/identity/identify_push/test_identify_push_exception_handling.py @@ -18,6 +18,9 @@ from libp2p.host.exceptions import ( HostException, ) +from libp2p.peer.peerinfo import ( + PeerInfo, +) from tests.utils.factories import ( host_pair_factory, ) @@ -31,15 +34,10 @@ async def test_push_identify_to_peer_exception_handling(): """Test that push_identify_to_peer properly handles exceptions.""" key_pair = create_new_key_pair() - host_a, host_b = host_pair_factory(key_pair) - # Start both hosts - async with run_host_forever(host_a), run_host_forever(host_b): - await wait_until_listening(host_a) - await wait_until_listening(host_b) - + async with host_pair_factory() as (host_a, host_b): # Connect the hosts - peer_info = host_b.get_addrs()[0] + peer_info = PeerInfo(host_b.get_id(), host_b.get_addrs()) await host_a.connect(peer_info) # Mock new_stream to raise an exception @@ -62,15 +60,10 @@ async def mock_new_stream(*args, **kwargs): async def test_push_identify_to_peer_preserves_exception_chain(): """Test that push_identify_to_peer preserves the original exception chain.""" key_pair = create_new_key_pair() - host_a, host_b = host_pair_factory(key_pair) - # Start both hosts - async with run_host_forever(host_a), run_host_forever(host_b): - await wait_until_listening(host_a) - await wait_until_listening(host_b) - + async with host_pair_factory() as (host_a, host_b): # Connect the hosts - peer_info = host_b.get_addrs()[0] + peer_info = PeerInfo(host_b.get_id(), host_b.get_addrs()) await host_a.connect(peer_info) original_error = ConnectionError("Original connection error") @@ -94,52 +87,47 @@ async def mock_new_stream(*args, **kwargs): async def test_update_peerstore_from_identify_exception_handling(): """Test that _update_peerstore_from_identify handles exceptions gracefully.""" key_pair = create_new_key_pair() - host_a, host_b = host_pair_factory(key_pair) - # Create a mock peerstore that raises exceptions - class MockPeerstore: - def add_protocols(self, peer_id, protocols): - raise ValueError("Mock protocol error") - - def add_pubkey(self, peer_id, pubkey): - raise KeyError("Mock pubkey error") - - def add_addr(self, peer_id, addr, ttl): - raise RuntimeError("Mock addr error") + async with host_pair_factory() as (host_a, host_b): + # Create a mock peerstore that raises exceptions + class MockPeerstore: + def add_protocols(self, peer_id, protocols): + raise ValueError("Mock protocol error") + + def add_pubkey(self, peer_id, pubkey): + raise KeyError("Mock pubkey error") + + def add_addr(self, peer_id, addr, ttl): + raise RuntimeError("Mock addr error") + + def consume_peer_record(self, envelope, ttl): + raise ConnectionError("Mock record error") + + mock_peerstore = MockPeerstore() + + # Create an identify message with various fields + identify_msg = Identify() + identify_msg.public_key = b"mock_public_key" + identify_msg.listen_addrs.extend([b"/ip4/127.0.0.1/tcp/4001"]) + identify_msg.protocols.extend(["/test/protocol/1.0.0"]) + identify_msg.observed_addr = b"/ip4/127.0.0.1/tcp/4002" + identify_msg.signedPeerRecord = b"mock_signed_record" + + # Test that the function handles exceptions gracefully + # It should not raise any exceptions, just log errors + await _update_peerstore_from_identify(mock_peerstore, host_b.get_id(), identify_msg) - def consume_peer_record(self, envelope, ttl): - raise ConnectionError("Mock record error") - - mock_peerstore = MockPeerstore() - - # Create an identify message with various fields - identify_msg = Identify() - identify_msg.public_key = b"mock_public_key" - identify_msg.listen_addrs.extend([b"/ip4/127.0.0.1/tcp/4001"]) - identify_msg.protocols.extend(["/test/protocol/1.0.0"]) - identify_msg.observed_addr = b"/ip4/127.0.0.1/tcp/4002" - identify_msg.signedPeerRecord = b"mock_signed_record" - - # Test that the function handles exceptions gracefully - # It should not raise any exceptions, just log errors - await _update_peerstore_from_identify(mock_peerstore, host_b.get_id(), identify_msg) - - # If we get here without exceptions, the test passes + # If we get here without exceptions, the test passes @pytest.mark.trio async def test_push_identify_to_peers_exception_handling(): """Test that push_identify_to_peers handles exceptions gracefully.""" key_pair = create_new_key_pair() - host_a, host_b = host_pair_factory(key_pair) - # Start both hosts - async with run_host_forever(host_a), run_host_forever(host_b): - await wait_until_listening(host_a) - await wait_until_listening(host_b) - + async with host_pair_factory() as (host_a, host_b): # Connect the hosts - peer_info = host_b.get_addrs()[0] + peer_info = PeerInfo(host_b.get_id(), host_b.get_addrs()) await host_a.connect(peer_info) # Mock new_stream to raise an exception for one peer diff --git a/tests/core/stream_muxer/test_mplex_stream.py b/tests/core/stream_muxer/test_mplex_stream.py index b556a4f6a..75ce3cea4 100644 --- a/tests/core/stream_muxer/test_mplex_stream.py +++ b/tests/core/stream_muxer/test_mplex_stream.py @@ -291,23 +291,12 @@ async def test_mplex_stream_set_deadline_exception_handling(monkeypatch, mplex_s """Test deadline setting handles exceptions properly.""" stream_0, _ = mplex_stream_pair - # Mock the deadline setting to raise an exception + # Test with negative deadline value to trigger validation error original_read_deadline = stream_0.read_deadline original_write_deadline = stream_0.write_deadline - def mock_set_deadline(ttl): - raise RuntimeError("Mock deadline error") - - # Patch the attribute assignment to raise an exception - def mock_setattr(obj, name, value): - if name in ('read_deadline', 'write_deadline'): - raise RuntimeError("Mock deadline error") - return setattr(obj, name, value) - - monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) - with pytest.raises(MuxedStreamError, match="Failed to set deadline"): - stream_0.set_deadline(30) + stream_0.set_deadline(-1) # Verify original values are preserved assert stream_0.read_deadline == original_read_deadline @@ -321,16 +310,9 @@ async def test_mplex_stream_set_read_deadline_exception_handling(monkeypatch, mp original_read_deadline = stream_0.read_deadline - # Patch the attribute assignment to raise an exception - def mock_setattr(obj, name, value): - if name == 'read_deadline': - raise ValueError("Mock read deadline error") - return setattr(obj, name, value) - - monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) - + # Test with negative deadline value to trigger validation error with pytest.raises(MuxedStreamError, match="Failed to set read deadline"): - stream_0.set_read_deadline(15) + stream_0.set_read_deadline(-1) # Verify original value is preserved assert stream_0.read_deadline == original_read_deadline @@ -343,16 +325,9 @@ async def test_mplex_stream_set_write_deadline_exception_handling(monkeypatch, m original_write_deadline = stream_0.write_deadline - # Patch the attribute assignment to raise an exception - def mock_setattr(obj, name, value): - if name == 'write_deadline': - raise KeyError("Mock write deadline error") - return setattr(obj, name, value) - - monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) - + # Test with negative deadline value to trigger validation error with pytest.raises(MuxedStreamError, match="Failed to set write deadline"): - stream_0.set_write_deadline(20) + stream_0.set_write_deadline(-1) # Verify original value is preserved assert stream_0.write_deadline == original_write_deadline @@ -363,19 +338,11 @@ async def test_mplex_stream_deadline_preserves_exception_chain(monkeypatch, mple """Test that deadline methods preserve the original exception chain.""" stream_0, _ = mplex_stream_pair - original_error = RuntimeError("Original deadline error") - - # Patch the attribute assignment to raise an exception - def mock_setattr(obj, name, value): - if name in ('read_deadline', 'write_deadline'): - raise original_error - return setattr(obj, name, value) - - monkeypatch.setattr(stream_0, '__setattr__', mock_setattr) - + # Test with negative deadline value to trigger validation error with pytest.raises(MuxedStreamError) as exc_info: - stream_0.set_deadline(30) + stream_0.set_deadline(-1) # Check that the original exception is preserved in the chain - assert exc_info.value.__cause__ is original_error + assert isinstance(exc_info.value.__cause__, ValueError) + assert "Deadline cannot be negative" in str(exc_info.value.__cause__) assert "Failed to set deadline" in str(exc_info.value)