Compare commits

..

No commits in common. "911d8118bc9fbf080b6e852524472d320cbd718c" and "421c6da25a0635e733a2f635c76d41235d3fc7b3" have entirely different histories.

11 changed files with 16 additions and 44 deletions

View File

@ -65,7 +65,6 @@ import onionrvalues # noqa
import onionrexceptions # noqa
import onionrsetup as setup # noqa
setup.setup_config()
setup.setup_default_plugins()
@ -85,7 +84,7 @@ import bigbrother # noqa
from onionrcommands import parser # noqa
from onionrplugins import onionrevents as events # noqa
setup.setup_config()
import config # noqa
from utils import identifyhome # noqa

View File

@ -23,7 +23,6 @@ You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
untrusted_exec = True
def block_system(cmd):
"""Prevent os.system except for whitelisted commands+contexts."""
@ -37,8 +36,6 @@ def block_exec(event, info):
# because libraries have stupid amounts of compile/exec/eval,
# We have to use a whitelist where it can be tolerated
# Generally better than nothing, not a silver bullet
if untrusted_exec:
return
whitelisted_code = [
'netrc.py',
'shlex.py',

View File

@ -3,7 +3,6 @@
Dandelion ++ Gossip client logic
"""
import traceback
from threading import Thread
from typing import TYPE_CHECKING
from typing import Set, Tuple
from time import sleep
@ -86,11 +85,7 @@ def start_gossip_client():
Stream new blocks
"""
bl: Block
def _start_announce():
sleep(60)
do_announce()
Thread(target=_start_announce, daemon=True).start()
# Start a thread that runs every 1200 secs to
# Ask peers for a subset for their peer set

View File

@ -1,4 +1,4 @@
from queue import Empty, Queue
from queue import Queue
from time import sleep
from secrets import choice
import traceback
@ -112,9 +112,7 @@ async def stem_out(d_phase: 'DandelionPhase'):
"Did not stem out any blocks in time, " +
"if this happens regularly you may be under attack",
terminal=True)
for s in peer_sockets:
if s:
s.close()
list(map(lambda p: p.close(), peer_sockets))
peer_sockets.clear()
break
# If above loop ran out of time or NotEnoughEdges, loops below will not execute
@ -126,8 +124,6 @@ async def stem_out(d_phase: 'DandelionPhase'):
for routine in stream_routines:
try:
await routine
except Empty:
pass
except Exception:
logger.warn(traceback.format_exc())
else:

View File

@ -8,8 +8,6 @@ if TYPE_CHECKING:
from onionrplugins import onionrevents
import logger
from socks import GeneralProxyError
from ..peer import Peer
from ..commands import GossipCommands, command_to_byte
from ..constants import PEER_AMOUNT_TO_ASK, TRANSPORT_SIZE_BYTES
@ -24,15 +22,11 @@ def _do_ask_peer(peer):
_ask_peer(peer)
except TimeoutError:
logger.debug("Timed out when asking for new peers")
except GeneralProxyError:
logger.debug("Proxy error")
logger.debug(format_exc(), terminal=True)
except Exception:
logger.error(format_exc(), terminal=True)
def _ask_peer(peer):
s: 'socket' = peer.get_socket(12)
s.sendall(command_to_byte(GossipCommands.PEER_EXCHANGE))
# Get 10 max peers
for _ in range(MAX_PEERS):
@ -55,8 +49,7 @@ def _ask_peer(peer):
def get_new_peers():
if not len(gossip_peer_set):
logger.debug("Peer set empty, cannot get new peers")
return
raise ValueError("Peer set empty")
# Deep copy the peer list
peer_list: Peer = list(gossip_peer_set)

View File

@ -1,4 +1,3 @@
import traceback
from gossip.commands import GossipCommands, command_to_byte
from .peerset import gossip_peer_set
@ -9,14 +8,12 @@ def connect_peer(peer):
if peer in gossip_peer_set:
return
try:
s = peer.get_socket(120)
s = peer.get_socket(15)
except Exception:
logger.warn(f"Could not connect to {peer.transport_address}")
logger.warn(traceback.format_exc())
else:
with s:
s.sendall(command_to_byte(GossipCommands.PING))
if s.recv(4).decode('utf-8') == 'PONG':
if s.recv(5).decode('utf-8') == 'PONG':
gossip_peer_set.add(peer)
logger.info(f"connected to {peer.transport_address}", terminal=True)
logger.info(f"connected to {peer.transport_address}")
s.close()

View File

@ -60,7 +60,6 @@ def gossip_server():
match GossipCommands(cmd):
case GossipCommands.PING:
writer.write(b'PONG')
break
case GossipCommands.ANNOUNCE:
async def _read_announce():
address = await reader.readuntil(b'\n')

View File

@ -61,7 +61,7 @@ async def diffuse_blocks(reader: 'StreamReader', writer: 'StreamWriter'):
_add_to_queue
)
async def _send_block(block: 'Block'):
async def _send_block(bl: 'Block'):
writer.write(block.id)
await writer.drain()

View File

@ -57,7 +57,7 @@ def show_stats():
# file and folder size stats
'div1': True, # this creates a solid line across the screen, a div
'Total Block Size':
sizeutils.human_size(sizeutils.size(home + 'blocks.db')),
sizeutils.human_size(sizeutils.size(home + 'blocks/')),
'Total Plugin Size':
sizeutils.human_size(sizeutils.size(home + 'plugins/')),
'Log File Size':

View File

@ -20,9 +20,6 @@ def on_announce_rec(api, data=None):
return
announced = announced.strip()
if not announced.endswith('.onion'):
announced += '.onion'
logger.info(f"Peer {announced} announced to us.", terminal=True)
data['callback'](TorPeer(socks_address, socks_port, announced))

View File

@ -2,7 +2,6 @@ import shelve
from threading import Thread
from time import sleep
import os
import dbm
import traceback
from typing import Callable
@ -40,14 +39,14 @@ def on_bootstrap(api, data):
try:
load_existing_peers(callback_func)
except dbm.error:
except FileNotFoundError:
try:
with open(bootstrap_file, 'r') as bootstrap_file_obj:
bootstrap_nodes = set(bootstrap_file_obj.read().split(','))
except FileNotFoundError:
bootstrap_nodes = set()
except Exception as e:
logger.warn(traceback.format_exc(), terminal=True)
logger.warn(traceback.format_exc())
return
else:
return