cleanup lookupblocks

This commit is contained in:
Kevin 2020-06-30 17:08:32 -05:00
parent e174b4a636
commit 63e676ed6f

View File

@ -1,4 +1,4 @@
"""Onionr - Private P2P Communication """Onionr - Private P2P Communication.
Lookup new blocks with the communicator using a random connected peer Lookup new blocks with the communicator using a random connected peer
""" """
@ -8,7 +8,7 @@ import logger
import onionrproofs import onionrproofs
from onionrutils import stringvalidators, epoch from onionrutils import stringvalidators, epoch
from communicator import peeraction, onlinepeers from communicator import peeraction, onlinepeers
from coredb import blockmetadb from coredb.blockmetadb import get_block_list
from utils import reconstructhash from utils import reconstructhash
from onionrblocks import onionrblacklist from onionrblocks import onionrblacklist
import onionrexceptions import onionrexceptions
@ -36,20 +36,24 @@ def lookup_blocks_from_communicator(comm_inst):
logger.info('Looking up new blocks') logger.info('Looking up new blocks')
tryAmount = 2 tryAmount = 2
newBlocks = '' newBlocks = ''
existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks # List of existing saved blocks
triedPeers = [] # list of peers we've tried this time around existingBlocks = get_block_list()
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion triedPeers = [] # list of peers we've tried this time around
lastLookupTime = 0 # Last time we looked up a particular peer's list # Max amount of *new* block hashes to have in queue
maxBacklog = 1560
lastLookupTime = 0 # Last time we looked up a particular peer's list
new_block_count = 0 new_block_count = 0
for i in range(tryAmount): for i in range(tryAmount):
listLookupCommand = 'getblocklist' # This is defined here to reset it each time # Defined here to reset it each time, time offset is added later
listLookupCommand = 'getblocklist'
if len(comm_inst.blockQueue) >= maxBacklog: if len(comm_inst.blockQueue) >= maxBacklog:
break break
if not comm_inst.isOnline: if not comm_inst.isOnline:
break break
# check if disk allocation is used # check if disk allocation is used
if comm_inst.storage_counter.is_full(): if comm_inst.storage_counter.is_full():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used') logger.debug(
'Not looking up new blocks due to maximum amount of disk used')
break break
try: try:
# select random online peer # select random online peer
@ -65,32 +69,41 @@ def lookup_blocks_from_communicator(comm_inst):
continue continue
triedPeers.append(peer) triedPeers.append(peer)
# Get the last time we looked up a peer's stamp to only fetch blocks since then. # Get the last time we looked up a peer's stamp,
# to only fetch blocks since then.
# Saved in memory only for privacy reasons # Saved in memory only for privacy reasons
try: try:
lastLookupTime = comm_inst.dbTimestamps[peer] lastLookupTime = comm_inst.dbTimestamps[peer]
except KeyError: except KeyError:
lastLookupTime = epoch.get_epoch() - config.get("general.max_block_age", onionrvalues.DEFAULT_EXPIRE) lastLookupTime = epoch.get_epoch() - \
config.get("general.max_block_age",
onionrvalues.DEFAULT_EXPIRE)
listLookupCommand += '?date=%s' % (lastLookupTime,) listLookupCommand += '?date=%s' % (lastLookupTime,)
try: try:
newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes newBlocks = peeraction.peer_action(
comm_inst,
peer, listLookupCommand) # get list of new block hashes
except Exception as error: except Exception as error:
logger.warn('Could not get new blocks from %s.' % peer, error = error) logger.warn(
f'Could not get new blocks from {peer}.',
error=error)
newBlocks = False newBlocks = False
else:
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60) if newBlocks != False: # noqa
if newBlocks != False:
# if request was a success # if request was a success
for i in newBlocks.split('\n'): for i in newBlocks.split('\n'):
if stringvalidators.validate_hash(i): if stringvalidators.validate_hash(i):
i = reconstructhash.reconstruct_hash(i) i = reconstructhash.reconstruct_hash(i)
# if newline seperated string is valid hash # if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue # if block does not exist on disk + is not already in queue
if i not in existingBlocks:
if i not in comm_inst.blockQueue: if i not in comm_inst.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i): if onionrproofs.hashMeetsDifficulty(i) and \
not blacklist.inBlacklist(i):
if len(comm_inst.blockQueue) <= 1000000: if len(comm_inst.blockQueue) <= 1000000:
comm_inst.blockQueue[i] = [peer] # add blocks to download queue # add blocks to download queue
comm_inst.blockQueue[i] = [peer]
new_block_count += 1 new_block_count += 1
else: else:
if peer not in comm_inst.blockQueue[i]: if peer not in comm_inst.blockQueue[i]:
@ -100,7 +113,9 @@ def lookup_blocks_from_communicator(comm_inst):
block_string = "" block_string = ""
if new_block_count > 1: if new_block_count > 1:
block_string = "s" block_string = "s"
logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True) logger.info(
comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99) f'Discovered {new_block_count} new block{block_string}',
terminal=True)
comm_inst.download_blocks_timer.count = \
int(comm_inst.download_blocks_timer.frequency * 0.99)
comm_inst.decrementThreadCount('lookup_blocks_from_communicator') comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
return