From dd5cb991552573efe758c46a88c22c538595e182 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Wed, 22 Aug 2018 23:59:41 -0500 Subject: [PATCH] * do not save blocks if disk allocation reached * improved some commenting * bug fixes --- onionr/blockimporter.py | 12 ++++++++---- onionr/communicator2.py | 26 ++++++++++++++++++-------- onionr/core.py | 23 ++++++++++++++--------- onionr/onionrexceptions.py | 5 +++++ onionr/onionrutils.py | 5 ++--- 5 files changed, 47 insertions(+), 24 deletions(-) diff --git a/onionr/blockimporter.py b/onionr/blockimporter.py index 2c29927f..ce1cd1fe 100644 --- a/onionr/blockimporter.py +++ b/onionr/blockimporter.py @@ -39,8 +39,12 @@ def importBlockFromData(content, coreInst): if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid if coreInst._crypto.verifyPow(content): # check if POW is enough/correct logger.info('Block passed proof, saving.') - blockHash = coreInst.setData(content) - coreInst.addToBlockDB(blockHash, dataSaved=True) - coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database - retData = True + try: + blockHash = coreInst.setData(content) + except onionrexceptions.DiskAllocationReached: + pass + else: + coreInst.addToBlockDB(blockHash, dataSaved=True) + coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database + retData = True return retData \ No newline at end of file diff --git a/onionr/communicator2.py b/onionr/communicator2.py index a575a350..da610f4b 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -117,14 +117,14 @@ class OnionrCommunicatorDaemon: pass logger.info('Goodbye.') - self._core._utils.localCommand('shutdown') + self._core._utils.localCommand('shutdown') # shutdown the api time.sleep(0.5) def lookupKeys(self): '''Lookup new keys''' logger.debug('Looking up new keys...') tryAmount = 1 - for i in range(tryAmount): + for i in range(tryAmount): # amount of times to ask peers for new keys # Download new key list from random online peers peer = self.pickOnlinePeer() newKeys = self.peerAction(peer, action='kex') @@ -151,6 +151,10 @@ class OnionrCommunicatorDaemon: existingBlocks = self._core.getBlockList() triedPeers = [] # list of peers we've tried this time around for i in range(tryAmount): + # check if disk allocation is used + if self._core._utils.storageCounter.isFull(): + logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used') + break peer = self.pickOnlinePeer() # select random online peer # if we've already tried all the online peers this time around, stop if peer in triedPeers: @@ -165,7 +169,7 @@ class OnionrCommunicatorDaemon: if newDBHash != self._core.getAddressInfo(peer, 'DBHash'): self._core.setAddressInfo(peer, 'DBHash', newDBHash) try: - newBlocks = self.peerAction(peer, 'getBlockHashes') + newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes except Exception as error: logger.warn("could not get new blocks with " + peer, error=error) newBlocks = False @@ -177,7 +181,7 @@ class OnionrCommunicatorDaemon: if not i in existingBlocks: # if block does not exist on disk and is not already in block queue if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i): - self.blockQueue.append(i) + self.blockQueue.append(i) # add blocks to download queue self.decrementThreadCount('lookupBlocks') return @@ -185,7 +189,9 @@ class OnionrCommunicatorDaemon: '''download new blocks in queue''' for blockHash in self.blockQueue: if self.shutdown: + # Exit loop if shutting down break + # Do not download blocks being downloaded or that are already saved (edge cases) if blockHash in self.currentDownloading: logger.debug('ALREADY DOWNLOADING ' + blockHash) continue @@ -193,7 +199,7 @@ class OnionrCommunicatorDaemon: logger.debug('%s is already saved' % (blockHash,)) self.blockQueue.remove(blockHash) continue - self.currentDownloading.append(blockHash) + self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block logger.info("Attempting to download %s..." % blockHash) peerUsed = self.pickOnlinePeer() content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata) @@ -216,9 +222,13 @@ class OnionrCommunicatorDaemon: if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce if self._core._crypto.verifyPow(content): # check if POW is enough/correct logger.info('Block passed proof, saving.') - self._core.setData(content) - self._core.addToBlockDB(blockHash, dataSaved=True) - self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database + try: + self._core.setData(content) + except onionrexceptions.DiskAllocationReached: + logger.error("Reached disk allocation allowance, cannot save additional blocks.") + else: + self._core.addToBlockDB(blockHash, dataSaved=True) + self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database else: logger.warn('POW failed for block ' + blockHash) else: diff --git a/onionr/core.py b/onionr/core.py index 9d97e3f1..14f4c3ba 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -50,6 +50,7 @@ class Core: self.dbCreate = dbcreator.DBCreator(self) self.usageFile = 'data/disk-usage.txt' + self.config = config if not os.path.exists('data/'): os.mkdir('data/') @@ -256,6 +257,8 @@ class Core: Set the data assciated with a hash ''' data = data + dataSize = sys.getsizeof(data) + if not type(data) is bytes: data = data.encode() @@ -268,15 +271,17 @@ class Core: pass # TODO: properly check if block is already saved elsewhere #raise Exception("Data is already set for " + dataHash) else: - blockFile = open(blockFileName, 'wb') - blockFile.write(data) - blockFile.close() - - conn = sqlite3.connect(self.blockDB) - c = conn.cursor() - c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") - conn.commit() - conn.close() + if self._utils.storageCounter.addBytes(dataSize) != False: + blockFile = open(blockFileName, 'wb') + blockFile.write(data) + blockFile.close() + conn = sqlite3.connect(self.blockDB) + c = conn.cursor() + c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") + conn.commit() + conn.close() + else: + raise onionrexceptions.DiskAllocationReached return dataHash diff --git a/onionr/onionrexceptions.py b/onionr/onionrexceptions.py index b26a97d7..040bc9be 100644 --- a/onionr/onionrexceptions.py +++ b/onionr/onionrexceptions.py @@ -58,3 +58,8 @@ class MissingPort(Exception): class InvalidAddress(Exception): pass + +# file exceptions + +class DiskAllocationReached: + pass \ No newline at end of file diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index 6fffbee8..bf57df66 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -23,7 +23,7 @@ import nacl.signing, nacl.encoding from onionrblockapi import Block import onionrexceptions from defusedxml import minidom -import pgpwords +import pgpwords, storagecounter if sys.version_info < (3, 6): try: import sha3 @@ -40,9 +40,9 @@ class OnionrUtils: self._core = coreInstance self.timingToken = '' - self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions self.peerProcessing = {} # dict of current peer actions: peer, actionList + self.storageCounter = storagecounter.StorageCounter(self._core) config.reload() return @@ -647,7 +647,6 @@ class OnionrUtils: if self.doGetRequest(url) != False: retData = True break - except FileNotFoundError: pass return retData