From 53577a4c10e8ab2b3002aa324ce66f8da4b45b09 Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 21 Aug 2018 15:01:50 -0500 Subject: [PATCH 01/14] work on netcheck and configuration --- onionr/communicator2.py | 3 +++ onionr/onionrdaemontools.py | 11 ++++++++++- onionr/onionrutils.py | 27 +++++++++++++++++++++++--- onionr/static-data/default_config.json | 2 +- 4 files changed, 38 insertions(+), 5 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index 38ba2692..a575a350 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -27,6 +27,8 @@ from defusedxml import minidom class OnionrCommunicatorDaemon: def __init__(self, debug, developmentMode): + self.isOnline = True # Assume we're connected to the internet + # list of timer instances self.timers = [] @@ -93,6 +95,7 @@ class OnionrCommunicatorDaemon: OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) + netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600) announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1) cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index 8410cb80..36264600 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -53,4 +53,13 @@ class DaemonTools: if self.daemon._core._utils.doPostRequest(url, data) == 'Success': retData = True self.daemon.decrementThreadCount('announceNode') - return retData \ No newline at end of file + return retData + + def netCheck(self): + '''Check if we are connected to the internet or not when we can't connect to any peers''' + if len(self.daemon.onlinePeers) != 0: + if not self.daemon._core._utils.checkNetwork(): + logger.warn('Network check failed, are you connected to the internet?') + self.daemon.isOnline = False + + self.daemon.decrementThreadCount('netCheck') \ No newline at end of file diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index 6d22992c..6fffbee8 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -131,8 +131,12 @@ class OnionrUtils: if not config.get('tor.v3onions') and len(adder) == 62: continue if self._core.addAddress(adder): - logger.info('Added %s to db.' % adder, timestamp = True) - retVal = True + # Check if we have the maxmium amount of allowed stored peers + if config.get('peers.maxStoredPeers') > len(self._core.listAdders): + logger.info('Added %s to db.' % adder, timestamp = True) + retVal = True + else: + logger.warn('Reached the maximum amount of peers in the net database as allowed by your config.') else: pass #logger.debug('%s is either our address or already in our DB' % adder) @@ -630,6 +634,23 @@ class OnionrUtils: except AttributeError: pass return data + + def checkNetwork(self): + '''Check if we are connected to the internet (through Tor)''' + retData = False + connectURLs = [] + try: + with open('static-data/connect-check.txt', 'r') as connectTest: + connectURLs = connectTest.read().split(',') + + for url in connectURLs: + if self.doGetRequest(url) != False: + retData = True + break + + except FileNotFoundError: + pass + return retData def size(path='.'): ''' @@ -655,4 +676,4 @@ def humanSize(num, suffix='B'): if abs(num) < 1024.0: return "%.1f %s%s" % (num, unit, suffix) num /= 1024.0 - return "%.1f %s%s" % (num, 'Yi', suffix) + return "%.1f %s%s" % (num, 'Yi', suffix) \ No newline at end of file diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 5458db4a..86d44499 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -58,7 +58,7 @@ }, "peers":{ "minimumScore": -100, - "maxStoredPeers": 500, + "maxStoredPeers": 5000, "maxConnect": 5 } } From dd5cb991552573efe758c46a88c22c538595e182 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Wed, 22 Aug 2018 23:59:41 -0500 Subject: [PATCH 02/14] * do not save blocks if disk allocation reached * improved some commenting * bug fixes --- onionr/blockimporter.py | 12 ++++++++---- onionr/communicator2.py | 26 ++++++++++++++++++-------- onionr/core.py | 23 ++++++++++++++--------- onionr/onionrexceptions.py | 5 +++++ onionr/onionrutils.py | 5 ++--- 5 files changed, 47 insertions(+), 24 deletions(-) diff --git a/onionr/blockimporter.py b/onionr/blockimporter.py index 2c29927f..ce1cd1fe 100644 --- a/onionr/blockimporter.py +++ b/onionr/blockimporter.py @@ -39,8 +39,12 @@ def importBlockFromData(content, coreInst): if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid if coreInst._crypto.verifyPow(content): # check if POW is enough/correct logger.info('Block passed proof, saving.') - blockHash = coreInst.setData(content) - coreInst.addToBlockDB(blockHash, dataSaved=True) - coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database - retData = True + try: + blockHash = coreInst.setData(content) + except onionrexceptions.DiskAllocationReached: + pass + else: + coreInst.addToBlockDB(blockHash, dataSaved=True) + coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database + retData = True return retData \ No newline at end of file diff --git a/onionr/communicator2.py b/onionr/communicator2.py index a575a350..da610f4b 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -117,14 +117,14 @@ class OnionrCommunicatorDaemon: pass logger.info('Goodbye.') - self._core._utils.localCommand('shutdown') + self._core._utils.localCommand('shutdown') # shutdown the api time.sleep(0.5) def lookupKeys(self): '''Lookup new keys''' logger.debug('Looking up new keys...') tryAmount = 1 - for i in range(tryAmount): + for i in range(tryAmount): # amount of times to ask peers for new keys # Download new key list from random online peers peer = self.pickOnlinePeer() newKeys = self.peerAction(peer, action='kex') @@ -151,6 +151,10 @@ class OnionrCommunicatorDaemon: existingBlocks = self._core.getBlockList() triedPeers = [] # list of peers we've tried this time around for i in range(tryAmount): + # check if disk allocation is used + if self._core._utils.storageCounter.isFull(): + logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used') + break peer = self.pickOnlinePeer() # select random online peer # if we've already tried all the online peers this time around, stop if peer in triedPeers: @@ -165,7 +169,7 @@ class OnionrCommunicatorDaemon: if newDBHash != self._core.getAddressInfo(peer, 'DBHash'): self._core.setAddressInfo(peer, 'DBHash', newDBHash) try: - newBlocks = self.peerAction(peer, 'getBlockHashes') + newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes except Exception as error: logger.warn("could not get new blocks with " + peer, error=error) newBlocks = False @@ -177,7 +181,7 @@ class OnionrCommunicatorDaemon: if not i in existingBlocks: # if block does not exist on disk and is not already in block queue if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i): - self.blockQueue.append(i) + self.blockQueue.append(i) # add blocks to download queue self.decrementThreadCount('lookupBlocks') return @@ -185,7 +189,9 @@ class OnionrCommunicatorDaemon: '''download new blocks in queue''' for blockHash in self.blockQueue: if self.shutdown: + # Exit loop if shutting down break + # Do not download blocks being downloaded or that are already saved (edge cases) if blockHash in self.currentDownloading: logger.debug('ALREADY DOWNLOADING ' + blockHash) continue @@ -193,7 +199,7 @@ class OnionrCommunicatorDaemon: logger.debug('%s is already saved' % (blockHash,)) self.blockQueue.remove(blockHash) continue - self.currentDownloading.append(blockHash) + self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block logger.info("Attempting to download %s..." % blockHash) peerUsed = self.pickOnlinePeer() content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata) @@ -216,9 +222,13 @@ class OnionrCommunicatorDaemon: if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce if self._core._crypto.verifyPow(content): # check if POW is enough/correct logger.info('Block passed proof, saving.') - self._core.setData(content) - self._core.addToBlockDB(blockHash, dataSaved=True) - self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database + try: + self._core.setData(content) + except onionrexceptions.DiskAllocationReached: + logger.error("Reached disk allocation allowance, cannot save additional blocks.") + else: + self._core.addToBlockDB(blockHash, dataSaved=True) + self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database else: logger.warn('POW failed for block ' + blockHash) else: diff --git a/onionr/core.py b/onionr/core.py index 9d97e3f1..14f4c3ba 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -50,6 +50,7 @@ class Core: self.dbCreate = dbcreator.DBCreator(self) self.usageFile = 'data/disk-usage.txt' + self.config = config if not os.path.exists('data/'): os.mkdir('data/') @@ -256,6 +257,8 @@ class Core: Set the data assciated with a hash ''' data = data + dataSize = sys.getsizeof(data) + if not type(data) is bytes: data = data.encode() @@ -268,15 +271,17 @@ class Core: pass # TODO: properly check if block is already saved elsewhere #raise Exception("Data is already set for " + dataHash) else: - blockFile = open(blockFileName, 'wb') - blockFile.write(data) - blockFile.close() - - conn = sqlite3.connect(self.blockDB) - c = conn.cursor() - c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") - conn.commit() - conn.close() + if self._utils.storageCounter.addBytes(dataSize) != False: + blockFile = open(blockFileName, 'wb') + blockFile.write(data) + blockFile.close() + conn = sqlite3.connect(self.blockDB) + c = conn.cursor() + c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") + conn.commit() + conn.close() + else: + raise onionrexceptions.DiskAllocationReached return dataHash diff --git a/onionr/onionrexceptions.py b/onionr/onionrexceptions.py index b26a97d7..040bc9be 100644 --- a/onionr/onionrexceptions.py +++ b/onionr/onionrexceptions.py @@ -58,3 +58,8 @@ class MissingPort(Exception): class InvalidAddress(Exception): pass + +# file exceptions + +class DiskAllocationReached: + pass \ No newline at end of file diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index 6fffbee8..bf57df66 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -23,7 +23,7 @@ import nacl.signing, nacl.encoding from onionrblockapi import Block import onionrexceptions from defusedxml import minidom -import pgpwords +import pgpwords, storagecounter if sys.version_info < (3, 6): try: import sha3 @@ -40,9 +40,9 @@ class OnionrUtils: self._core = coreInstance self.timingToken = '' - self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions self.peerProcessing = {} # dict of current peer actions: peer, actionList + self.storageCounter = storagecounter.StorageCounter(self._core) config.reload() return @@ -647,7 +647,6 @@ class OnionrUtils: if self.doGetRequest(url) != False: retData = True break - except FileNotFoundError: pass return retData From a6719abed78a221f6f263ea2b70d593603f1f109 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 09:01:17 -0500 Subject: [PATCH 03/14] added storagecounter.py --- onionr/storagecounter.py | 61 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 onionr/storagecounter.py diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py new file mode 100644 index 00000000..50456a24 --- /dev/null +++ b/onionr/storagecounter.py @@ -0,0 +1,61 @@ +''' + Onionr - P2P Microblogging Platform & Social network. + + Keeps track of how much disk space we're using +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' +import config + +class StorageCounter: + def __init__(self, coreInst): + self._core = coreInst + self.dataFile = self._core.usageFile + return + + def isFull(self): + retData = False + if self._core.config.get('allocations.disk') >= self.getAmount(): + retData = True + return retData + + def _update(self, data): + with open(self.dataFile, 'w') as dataFile: + dataFile.write(str(data)) + def getAmount(self, data): + '''Return how much disk space we're using (according to record)''' + retData = 0 + try: + with open(self.dataFile, 'w') as dataFile: + retData = int(dataFile.read()) + except FileNotFoundError: + pass + return retData + + def addBytes(self, amount): + '''Record that we are now using more disk space, unless doing so would exceed configured max''' + newAmount = amount + self.getAmount() + retData = newAmount + if newAmount > self._core.config.get('allocations.disk'): + retData = False + else: + self._update(newAmount) + return retData + + def removeBytes(self, amount): + '''Record that we are now using less disk space''' + newAmount = self.getAmount() - amount + self._update(newAmount) + return newAmount \ No newline at end of file From 1217e4a83c36d80975e83535481b67b99b972415 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 09:45:51 -0500 Subject: [PATCH 04/14] removed bad argument in storagecounter --- onionr/storagecounter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index 50456a24..b2ccd0d1 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -34,7 +34,7 @@ class StorageCounter: def _update(self, data): with open(self.dataFile, 'w') as dataFile: dataFile.write(str(data)) - def getAmount(self, data): + def getAmount(self): '''Return how much disk space we're using (according to record)''' retData = 0 try: From cd39ae68b6700fc56d8e097c97a33766b3c48283 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 09:51:53 -0500 Subject: [PATCH 05/14] r not w --- onionr/storagecounter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index b2ccd0d1..024eb4f5 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -38,7 +38,7 @@ class StorageCounter: '''Return how much disk space we're using (according to record)''' retData = 0 try: - with open(self.dataFile, 'w') as dataFile: + with open(self.dataFile, 'r') as dataFile: retData = int(dataFile.read()) except FileNotFoundError: pass From 25e4444bdafb16ef708952a6d278dad7b596284c Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 09:54:37 -0500 Subject: [PATCH 06/14] fix comparison error in storagecounter --- onionr/storagecounter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index 024eb4f5..481d7821 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -27,7 +27,7 @@ class StorageCounter: def isFull(self): retData = False - if self._core.config.get('allocations.disk') >= self.getAmount(): + if self._core.config.get('allocations.disk') <= self.getAmount(): retData = True return retData From 6d31fa4229b1b871b0cabe394e237431a61295ad Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 09:59:00 -0500 Subject: [PATCH 07/14] fix peer amount check not calling method --- onionr/onionrutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index bf57df66..b48160a3 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -132,7 +132,7 @@ class OnionrUtils: continue if self._core.addAddress(adder): # Check if we have the maxmium amount of allowed stored peers - if config.get('peers.maxStoredPeers') > len(self._core.listAdders): + if config.get('peers.maxStoredPeers') > len(self._core.listAdders()): logger.info('Added %s to db.' % adder, timestamp = True) retVal = True else: From e34c08b0368e14c545e7e5c9008fc1b1d3bcc141 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 12:48:49 -0500 Subject: [PATCH 08/14] sync improvements, bug fixes, config changes --- onionr/communicator2.py | 22 +++++++--------------- onionr/core.py | 14 ++++++++++++-- onionr/onionr.py | 10 ++++++++++ onionr/onionrdaemontools.py | 10 ++++++++-- onionr/onionrexceptions.py | 2 +- onionr/onionrutils.py | 4 ---- onionr/static-data/default_config.json | 2 +- onionr/storagecounter.py | 2 +- 8 files changed, 40 insertions(+), 26 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index da610f4b..057defed 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -80,10 +80,6 @@ class OnionrCommunicatorDaemon: if debug or developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 10) - # Print nice header thing :) - if config.get('general.display_header', True) and not self.shutdown: - self.header() - # Set timers, function reference, seconds # requiresPeer True means the timer function won't fire if we have no connected peers # TODO: make some of these timer counts configurable @@ -93,6 +89,7 @@ class OnionrCommunicatorDaemon: OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) + OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 650) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600) @@ -152,6 +149,8 @@ class OnionrCommunicatorDaemon: triedPeers = [] # list of peers we've tried this time around for i in range(tryAmount): # check if disk allocation is used + if not self.isOnline: + break if self._core._utils.storageCounter.isFull(): logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used') break @@ -188,8 +187,8 @@ class OnionrCommunicatorDaemon: def getBlocks(self): '''download new blocks in queue''' for blockHash in self.blockQueue: - if self.shutdown: - # Exit loop if shutting down + if self.shutdown or not self.isOnline: + # Exit loop if shutting down or offline break # Do not download blocks being downloaded or that are already saved (edge cases) if blockHash in self.currentDownloading: @@ -221,11 +220,11 @@ class OnionrCommunicatorDaemon: #meta = metas[1] if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce if self._core._crypto.verifyPow(content): # check if POW is enough/correct - logger.info('Block passed proof, saving.') + logger.info('Block passed proof, attemping save.') try: self._core.setData(content) except onionrexceptions.DiskAllocationReached: - logger.error("Reached disk allocation allowance, cannot save additional blocks.") + logger.error("Reached disk allocation allowance, cannot save this block.") else: self._core.addToBlockDB(blockHash, dataSaved=True) self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database @@ -489,13 +488,6 @@ class OnionrCommunicatorDaemon: self.shutdown = True self.decrementThreadCount('detectAPICrash') - def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'): - if os.path.exists('static-data/header.txt'): - with open('static-data/header.txt', 'rb') as file: - # only to stdout, not file or log or anything - sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION)) - logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n') - class OnionrCommunicatorTimers: def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False): self.timerFunction = timerFunction diff --git a/onionr/core.py b/onionr/core.py index 14f4c3ba..13c89c9e 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -183,8 +183,16 @@ class Core: c.execute('Delete from hashes where hash=?;', t) conn.commit() conn.close() + blockFile = 'data/blocks/' + block + '.dat' + dataSize = 0 try: - os.remove('data/blocks/' + block + '.dat') + ''' Get size of data when loaded as an object/var, rather than on disk, + to avoid conflict with getsizeof when saving blocks + ''' + with open(blockFile, 'r') as data: + dataSize = sys.getsizeof(data.read()) + self._utils.storageCounter.removeBytes(dataSize) + os.remove(blockFile) except FileNotFoundError: pass @@ -280,6 +288,8 @@ class Core: c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") conn.commit() conn.close() + with open(self.dataNonceFile, 'a') as nonceFile: + nonceFile.write(dataHash + '\n') else: raise onionrexceptions.DiskAllocationReached @@ -544,7 +554,7 @@ class Core: if unsaved: execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();' else: - execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;' + execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;' rows = list() for row in c.execute(execute): for i in row: diff --git a/onionr/onionr.py b/onionr/onionr.py index 1736c3f9..52b12e48 100755 --- a/onionr/onionr.py +++ b/onionr/onionr.py @@ -588,6 +588,9 @@ class Onionr: time.sleep(1) #TODO make runable on windows subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)]) + # Print nice header thing :) + if config.get('general.display_header', True): + self.header() logger.debug('Started communicator') events.event('daemon_start', onionr = self) try: @@ -759,5 +762,12 @@ class Onionr: print('Opening %s ...' % url) webbrowser.open(url, new = 1, autoraise = True) + def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'): + if os.path.exists('static-data/header.txt'): + with open('static-data/header.txt', 'rb') as file: + # only to stdout, not file or log or anything + sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', ONIONR_VERSION)) + logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n') + if __name__ == "__main__": Onionr() diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index 36264600..f269f028 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -61,5 +61,11 @@ class DaemonTools: if not self.daemon._core._utils.checkNetwork(): logger.warn('Network check failed, are you connected to the internet?') self.daemon.isOnline = False - - self.daemon.decrementThreadCount('netCheck') \ No newline at end of file + self.daemon.decrementThreadCount('netCheck') + + def cleanOldBlocks(self): + '''Delete old blocks if our disk allocation is full/near full''' + if self.daemon._core._utils.storageCounter.isFull(): + + + self.daemon.decrementThreadCount('cleanOldBlocks') \ No newline at end of file diff --git a/onionr/onionrexceptions.py b/onionr/onionrexceptions.py index 040bc9be..8044508f 100644 --- a/onionr/onionrexceptions.py +++ b/onionr/onionrexceptions.py @@ -61,5 +61,5 @@ class InvalidAddress(Exception): # file exceptions -class DiskAllocationReached: +class DiskAllocationReached(Exception): pass \ No newline at end of file diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index b48160a3..d1383cab 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -384,10 +384,6 @@ class OnionrUtils: pass else: retData = True - if retData: - # Executes if data not seen - with open(self._core.dataNonceFile, 'a') as nonceFile: - nonceFile.write(nonce + '\n') else: logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object') diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 86d44499..4b53f391 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -51,7 +51,7 @@ }, "allocations":{ - "disk": 9000000000, + "disk": 800, "netTotal": 1000000000, "blockCache": 5000000, "blockCacheTotal": 50000000 diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index 481d7821..a5c11f21 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -27,7 +27,7 @@ class StorageCounter: def isFull(self): retData = False - if self._core.config.get('allocations.disk') <= self.getAmount(): + if self._core.config.get('allocations.disk') <= (self.getAmount() + 100): retData = True return retData From 638436ee10c535999d4334be4e55e5ff4c8b596a Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 13:02:48 -0500 Subject: [PATCH 09/14] sync improvements, bug fixes, config changes --- onionr/onionrdaemontools.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index f269f028..e36945c0 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -65,7 +65,9 @@ class DaemonTools: def cleanOldBlocks(self): '''Delete old blocks if our disk allocation is full/near full''' - if self.daemon._core._utils.storageCounter.isFull(): - - + while self.daemon._core._utils.storageCounter.isFull(): + oldest = self.daemon._core.getBlockList[0] + self.daemon._core._blacklist.addToDB(oldest) + self.daemon._core.removeBlock(oldest) + logger.info('Deleted block: %s' % (oldest,)) self.daemon.decrementThreadCount('cleanOldBlocks') \ No newline at end of file From 1faae80aafdc4907a158bc7cb78e763762a993eb Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 13:24:32 -0500 Subject: [PATCH 10/14] sync improvements, bug fixes, config changes --- onionr/core.py | 2 ++ onionr/onionrdaemontools.py | 2 +- onionr/storagecounter.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/onionr/core.py b/onionr/core.py index 13c89c9e..c6495f2f 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -175,6 +175,8 @@ class Core: def removeBlock(self, block): ''' remove a block from this node (does not automatically blacklist) + + **You may want blacklist.addToDB(blockHash) ''' if self._utils.validateHash(block): conn = sqlite3.connect(self.blockDB) diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index e36945c0..ff4a9b3f 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -66,7 +66,7 @@ class DaemonTools: def cleanOldBlocks(self): '''Delete old blocks if our disk allocation is full/near full''' while self.daemon._core._utils.storageCounter.isFull(): - oldest = self.daemon._core.getBlockList[0] + oldest = self.daemon._core.getBlockList()[0] self.daemon._core._blacklist.addToDB(oldest) self.daemon._core.removeBlock(oldest) logger.info('Deleted block: %s' % (oldest,)) diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index a5c11f21..b88bc4f5 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -27,7 +27,7 @@ class StorageCounter: def isFull(self): retData = False - if self._core.config.get('allocations.disk') <= (self.getAmount() + 100): + if self._core.config.get('allocations.disk') <= (self.getAmount() + 500): retData = True return retData From e346c092282fcfa0de3af8858528ddf8e6499942 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Thu, 23 Aug 2018 14:46:23 -0500 Subject: [PATCH 11/14] bug fixes --- onionr/communicator2.py | 11 ++++++++--- onionr/onionrdaemontools.py | 2 +- onionr/onionrpeers.py | 6 +++++- onionr/onionrutils.py | 4 ++-- onionr/static-data/default_config.json | 2 +- 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index 057defed..42bbbce8 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -89,7 +89,7 @@ class OnionrCommunicatorDaemon: OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) - OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 650) + OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600) @@ -152,7 +152,7 @@ class OnionrCommunicatorDaemon: if not self.isOnline: break if self._core._utils.storageCounter.isFull(): - logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used') + logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used') break peer = self.pickOnlinePeer() # select random online peer # if we've already tried all the online peers this time around, stop @@ -187,6 +187,7 @@ class OnionrCommunicatorDaemon: def getBlocks(self): '''download new blocks in queue''' for blockHash in self.blockQueue: + removeFromQueue = True if self.shutdown or not self.isOnline: # Exit loop if shutting down or offline break @@ -198,6 +199,8 @@ class OnionrCommunicatorDaemon: logger.debug('%s is already saved' % (blockHash,)) self.blockQueue.remove(blockHash) continue + if self._core._utils.storageCounter.isFull(): + break self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block logger.info("Attempting to download %s..." % blockHash) peerUsed = self.pickOnlinePeer() @@ -225,6 +228,7 @@ class OnionrCommunicatorDaemon: self._core.setData(content) except onionrexceptions.DiskAllocationReached: logger.error("Reached disk allocation allowance, cannot save this block.") + removeFromQueue = False else: self._core.addToBlockDB(blockHash, dataSaved=True) self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database @@ -246,7 +250,8 @@ class OnionrCommunicatorDaemon: # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50) logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) - self.blockQueue.remove(blockHash) # remove from block queue both if success or false + if removeFromQueue: + self.blockQueue.remove(blockHash) # remove from block queue both if success or false self.currentDownloading.remove(blockHash) self.decrementThreadCount('getBlocks') return diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py index ff4a9b3f..7509d421 100644 --- a/onionr/onionrdaemontools.py +++ b/onionr/onionrdaemontools.py @@ -58,7 +58,7 @@ class DaemonTools: def netCheck(self): '''Check if we are connected to the internet or not when we can't connect to any peers''' if len(self.daemon.onlinePeers) != 0: - if not self.daemon._core._utils.checkNetwork(): + if not self.daemon._core._utils.checkNetwork(torPort=self.daemon.proxyPort): logger.warn('Network check failed, are you connected to the internet?') self.daemon.isOnline = False self.daemon.decrementThreadCount('netCheck') diff --git a/onionr/onionrpeers.py b/onionr/onionrpeers.py index 710f698d..82224d0a 100644 --- a/onionr/onionrpeers.py +++ b/onionr/onionrpeers.py @@ -90,7 +90,11 @@ def peerCleanup(coreInst): if PeerProfiles(address, coreInst).score < minScore: coreInst.removeAddress(address) try: - coreInst._blacklist.addToDB(address, dataType=1, expire=300) + if (self.coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600: + expireTime = 600 + else: + expireTime = 86400 + coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime) except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue pass logger.warn('Removed address ' + address + '.') diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index d1383cab..2a47aaba 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -631,7 +631,7 @@ class OnionrUtils: pass return data - def checkNetwork(self): + def checkNetwork(self, torPort=0): '''Check if we are connected to the internet (through Tor)''' retData = False connectURLs = [] @@ -640,7 +640,7 @@ class OnionrUtils: connectURLs = connectTest.read().split(',') for url in connectURLs: - if self.doGetRequest(url) != False: + if self.doGetRequest(url, port=torPort) != False: retData = True break except FileNotFoundError: diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 4b53f391..9fba656b 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -51,7 +51,7 @@ }, "allocations":{ - "disk": 800, + "disk": 10000000000, "netTotal": 1000000000, "blockCache": 5000000, "blockCacheTotal": 50000000 From b46bd42d9e6aa21711b9c17267adf927c0552e1a Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Fri, 24 Aug 2018 17:42:09 -0500 Subject: [PATCH 12/14] fixed invalid argument call --- onionr/communicator2.py | 4 +++- onionr/core.py | 2 ++ onionr/storagecounter.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index 42bbbce8..df428c03 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -199,6 +199,8 @@ class OnionrCommunicatorDaemon: logger.debug('%s is already saved' % (blockHash,)) self.blockQueue.remove(blockHash) continue + if self._core._blacklist.inBlacklist(blockHash): + continue if self._core._utils.storageCounter.isFull(): break self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block @@ -223,7 +225,7 @@ class OnionrCommunicatorDaemon: #meta = metas[1] if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce if self._core._crypto.verifyPow(content): # check if POW is enough/correct - logger.info('Block passed proof, attemping save.') + logger.info('Block passed proof, attempting save.') try: self._core.setData(content) except onionrexceptions.DiskAllocationReached: diff --git a/onionr/core.py b/onionr/core.py index c6495f2f..9d0b831d 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -52,6 +52,8 @@ class Core: self.usageFile = 'data/disk-usage.txt' self.config = config + self.maxBlockSize = 10000000 # max block size in bytes + if not os.path.exists('data/'): os.mkdir('data/') if not os.path.exists('data/blocks/'): diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py index b88bc4f5..4468dacc 100644 --- a/onionr/storagecounter.py +++ b/onionr/storagecounter.py @@ -27,7 +27,7 @@ class StorageCounter: def isFull(self): retData = False - if self._core.config.get('allocations.disk') <= (self.getAmount() + 500): + if self._core.config.get('allocations.disk') <= (self.getAmount() + 1000): retData = True return retData From b6c015255deecec4525898d168cb1380c6eea0f6 Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Fri, 24 Aug 2018 18:57:53 -0500 Subject: [PATCH 13/14] do not use self in peercleanup --- onionr/onionrpeers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onionr/onionrpeers.py b/onionr/onionrpeers.py index 82224d0a..a1f4aa8b 100644 --- a/onionr/onionrpeers.py +++ b/onionr/onionrpeers.py @@ -90,7 +90,7 @@ def peerCleanup(coreInst): if PeerProfiles(address, coreInst).score < minScore: coreInst.removeAddress(address) try: - if (self.coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600: + if (coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600: expireTime = 600 else: expireTime = 86400 From 7a7b62725958d32be04ea1d2a374dc0ef49f839d Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Sat, 25 Aug 2018 09:33:38 -0500 Subject: [PATCH 14/14] use config for some timers --- onionr/communicator2.py | 4 ++-- onionr/static-data/default_config.json | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/onionr/communicator2.py b/onionr/communicator2.py index df428c03..1c8329da 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -86,8 +86,8 @@ class OnionrCommunicatorDaemon: OnionrCommunicatorTimers(self, self.daemonCommands, 5) OnionrCommunicatorTimers(self, self.detectAPICrash, 5) peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60) - OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) - OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) + OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1) + OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 9fba656b..6e35245e 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -60,5 +60,9 @@ "minimumScore": -100, "maxStoredPeers": 5000, "maxConnect": 5 + }, + "timers":{ + "lookupBlocks": 25, + "getBlocks": 30 } }