Don't spam tracebacks when sockets timeout

This commit is contained in:
Kevin F 2022-05-20 10:13:12 -05:00
parent 85626d6642
commit 158178d6fc
3 changed files with 19 additions and 4 deletions

View File

@ -55,10 +55,14 @@ async def _setup_edge(
s.settimeout(10)
if s.recv(1) == dandelion.StemAcceptResult.DENY:
raise StemConnectionDenied
except TimeoutError:
logger.debug("Peer timed out when establishing stem connection", terminal=True)
logger.debug(traceback.format_exc())
except StemConnectionDenied:
logger.debug(
"Stem connection denied (peer has too many) " +
f"{peer.transport_address}")
logger.debug(traceback.format_exc())
except Exception:
logger.warn(
"Error asking peer to establish stem connection" +

View File

@ -1,10 +1,12 @@
from threading import Thread
from time import sleep
from traceback import format_exc
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from socket import socket
from onionrplugins import onionrevents
import logger
from ..peer import Peer
from ..commands import GossipCommands, command_to_byte
@ -15,6 +17,14 @@ from ..peerset import gossip_peer_set
MAX_PEERS = 10
def _do_ask_peer(peer):
try:
_ask_peer(peer)
except TimeoutError:
logger.debug("Timed out when asking for new peers")
except Exception:
logger.error(format_exc(), terminal=True)
def _ask_peer(peer):
s: 'socket' = peer.get_socket(12)
s.sendall(command_to_byte(GossipCommands.PEER_EXCHANGE))
@ -60,7 +70,7 @@ def get_new_peers():
# Start threads to ask the peers for more peers
threads = []
for peer in peers_we_ask:
t = Thread(target=_ask_peer, args=[peer], daemon=True)
t = Thread(target=_do_ask_peer, args=[peer], daemon=True)
t.start()
threads.append(t)
peers_we_ask.clear()

View File

@ -12,7 +12,8 @@ def connect_peer(peer):
except Exception:
logger.warn(f"Could not connect to {peer.transport_address}")
else:
s.sendall(command_to_byte(GossipCommands.CLOSE))
s.sendall(command_to_byte(GossipCommands.PING))
if s.recv(5).decode('utf-8') == 'PONG':
gossip_peer_set.add(peer)
logger.info(f"connected to {peer.transport_address}")
s.close()
gossip_peer_set.add(peer)
logger.info(f"connected to {peer.transport_address}")