From e346c092282fcfa0de3af8858528ddf8e6499942 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 14:46:23 -0500 Subject: [PATCH] bug fixes --- onionr/communicator2.py | 11 ++++++++--- onionr/onionrdaemontools.py | 2 +- onionr/onionrpeers.py | 6 +++++- onionr/onionrutils.py | 4 ++-- onionr/static-data/default_config.json | 2 +- 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index 057defed..42bbbce8 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -89,7 +89,7 @@ class OnionrCommunicatorDaemon: OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) - OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 650) + OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600) @@ -152,7 +152,7 @@ class OnionrCommunicatorDaemon: if not self.isOnline: break if self._core._utils.storageCounter.isFull(): - logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used') + logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used') break peer = self.pickOnlinePeer() # select random online peer # if we've already tried all the online peers this time around, stop @@ -187,6 +187,7 @@ class OnionrCommunicatorDaemon: def getBlocks(self): '''download new blocks in queue''' for blockHash in self.blockQueue: + removeFromQueue = True if self.shutdown or not self.isOnline: # Exit loop if shutting down or offline break @@ -198,6 +199,8 @@ class OnionrCommunicatorDaemon: logger.debug('%s is already saved' % (blockHash,)) self.blockQueue.remove(blockHash) continue + if self._core._utils.storageCounter.isFull(): + break self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block logger.info("Attempting to download %s..." % blockHash) peerUsed = self.pickOnlinePeer() @@ -225,6 +228,7 @@ class OnionrCommunicatorDaemon: self._core.setData(content) except onionrexceptions.DiskAllocationReached: logger.error("Reached disk allocation allowance, cannot save this block.") + removeFromQueue = False else: self._core.addToBlockDB(blockHash, dataSaved=True) self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database @@ -246,7 +250,8 @@ class OnionrCommunicatorDaemon: # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50) logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) - self.blockQueue.remove(blockHash) # remove from block queue both if success or false + if removeFromQueue: + self.blockQueue.remove(blockHash) # remove from block queue both if success or false self.currentDownloading.remove(blockHash) self.decrementThreadCount('getBlocks') return diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index ff4a9b3f..7509d421 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -58,7 +58,7 @@ class DaemonTools: def netCheck(self): '''Check if we are connected to the internet or not when we can't connect to any peers''' if len(self.daemon.onlinePeers) != 0: - if not self.daemon._core._utils.checkNetwork(): + if not self.daemon._core._utils.checkNetwork(torPort=self.daemon.proxyPort): logger.warn('Network check failed, are you connected to the internet?') self.daemon.isOnline = False self.daemon.decrementThreadCount('netCheck') diff --git a/onionr/onionrpeers.py b/onionr/onionrpeers.py index 710f698d..82224d0a 100644 --- a/onionr/onionrpeers.py +++ b/onionr/onionrpeers.py @@ -90,7 +90,11 @@ def peerCleanup(coreInst): if PeerProfiles(address, coreInst).score < minScore: coreInst.removeAddress(address) try: - coreInst._blacklist.addToDB(address, dataType=1, expire=300) + if (self.coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600: + expireTime = 600 + else: + expireTime = 86400 + coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime) except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue pass logger.warn('Removed address ' + address + '.') diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index d1383cab..2a47aaba 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -631,7 +631,7 @@ class OnionrUtils: pass return data - def checkNetwork(self): + def checkNetwork(self, torPort=0): '''Check if we are connected to the internet (through Tor)''' retData = False connectURLs = [] @@ -640,7 +640,7 @@ class OnionrUtils: connectURLs = connectTest.read().split(',') for url in connectURLs: - if self.doGetRequest(url) != False: + if self.doGetRequest(url, port=torPort) != False: retData = True break except FileNotFoundError: diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 4b53f391..9fba656b 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -51,7 +51,7 @@ }, "allocations":{ - "disk": 800, + "disk": 10000000000, "netTotal": 1000000000, "blockCache": 5000000, "blockCacheTotal": 50000000