sync improvements, bug fixes, config changes
This commit is contained in:
parent
6d31fa4229
commit
e34c08b036
@ -80,10 +80,6 @@ class OnionrCommunicatorDaemon:
|
|||||||
if debug or developmentMode:
|
if debug or developmentMode:
|
||||||
OnionrCommunicatorTimers(self, self.heartbeat, 10)
|
OnionrCommunicatorTimers(self, self.heartbeat, 10)
|
||||||
|
|
||||||
# Print nice header thing :)
|
|
||||||
if config.get('general.display_header', True) and not self.shutdown:
|
|
||||||
self.header()
|
|
||||||
|
|
||||||
# Set timers, function reference, seconds
|
# Set timers, function reference, seconds
|
||||||
# requiresPeer True means the timer function won't fire if we have no connected peers
|
# requiresPeer True means the timer function won't fire if we have no connected peers
|
||||||
# TODO: make some of these timer counts configurable
|
# TODO: make some of these timer counts configurable
|
||||||
@ -93,6 +89,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
|
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
|
||||||
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
||||||
|
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 650)
|
||||||
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
||||||
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
||||||
@ -152,6 +149,8 @@ class OnionrCommunicatorDaemon:
|
|||||||
triedPeers = [] # list of peers we've tried this time around
|
triedPeers = [] # list of peers we've tried this time around
|
||||||
for i in range(tryAmount):
|
for i in range(tryAmount):
|
||||||
# check if disk allocation is used
|
# check if disk allocation is used
|
||||||
|
if not self.isOnline:
|
||||||
|
break
|
||||||
if self._core._utils.storageCounter.isFull():
|
if self._core._utils.storageCounter.isFull():
|
||||||
logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used')
|
logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||||
break
|
break
|
||||||
@ -188,8 +187,8 @@ class OnionrCommunicatorDaemon:
|
|||||||
def getBlocks(self):
|
def getBlocks(self):
|
||||||
'''download new blocks in queue'''
|
'''download new blocks in queue'''
|
||||||
for blockHash in self.blockQueue:
|
for blockHash in self.blockQueue:
|
||||||
if self.shutdown:
|
if self.shutdown or not self.isOnline:
|
||||||
# Exit loop if shutting down
|
# Exit loop if shutting down or offline
|
||||||
break
|
break
|
||||||
# Do not download blocks being downloaded or that are already saved (edge cases)
|
# Do not download blocks being downloaded or that are already saved (edge cases)
|
||||||
if blockHash in self.currentDownloading:
|
if blockHash in self.currentDownloading:
|
||||||
@ -221,11 +220,11 @@ class OnionrCommunicatorDaemon:
|
|||||||
#meta = metas[1]
|
#meta = metas[1]
|
||||||
if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||||
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
|
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
|
||||||
logger.info('Block passed proof, saving.')
|
logger.info('Block passed proof, attemping save.')
|
||||||
try:
|
try:
|
||||||
self._core.setData(content)
|
self._core.setData(content)
|
||||||
except onionrexceptions.DiskAllocationReached:
|
except onionrexceptions.DiskAllocationReached:
|
||||||
logger.error("Reached disk allocation allowance, cannot save additional blocks.")
|
logger.error("Reached disk allocation allowance, cannot save this block.")
|
||||||
else:
|
else:
|
||||||
self._core.addToBlockDB(blockHash, dataSaved=True)
|
self._core.addToBlockDB(blockHash, dataSaved=True)
|
||||||
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
||||||
@ -489,13 +488,6 @@ class OnionrCommunicatorDaemon:
|
|||||||
self.shutdown = True
|
self.shutdown = True
|
||||||
self.decrementThreadCount('detectAPICrash')
|
self.decrementThreadCount('detectAPICrash')
|
||||||
|
|
||||||
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
|
|
||||||
if os.path.exists('static-data/header.txt'):
|
|
||||||
with open('static-data/header.txt', 'rb') as file:
|
|
||||||
# only to stdout, not file or log or anything
|
|
||||||
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION))
|
|
||||||
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
|
|
||||||
|
|
||||||
class OnionrCommunicatorTimers:
|
class OnionrCommunicatorTimers:
|
||||||
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
|
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
|
||||||
self.timerFunction = timerFunction
|
self.timerFunction = timerFunction
|
||||||
|
@ -183,8 +183,16 @@ class Core:
|
|||||||
c.execute('Delete from hashes where hash=?;', t)
|
c.execute('Delete from hashes where hash=?;', t)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
blockFile = 'data/blocks/' + block + '.dat'
|
||||||
|
dataSize = 0
|
||||||
try:
|
try:
|
||||||
os.remove('data/blocks/' + block + '.dat')
|
''' Get size of data when loaded as an object/var, rather than on disk,
|
||||||
|
to avoid conflict with getsizeof when saving blocks
|
||||||
|
'''
|
||||||
|
with open(blockFile, 'r') as data:
|
||||||
|
dataSize = sys.getsizeof(data.read())
|
||||||
|
self._utils.storageCounter.removeBytes(dataSize)
|
||||||
|
os.remove(blockFile)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -280,6 +288,8 @@ class Core:
|
|||||||
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
|
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
with open(self.dataNonceFile, 'a') as nonceFile:
|
||||||
|
nonceFile.write(dataHash + '\n')
|
||||||
else:
|
else:
|
||||||
raise onionrexceptions.DiskAllocationReached
|
raise onionrexceptions.DiskAllocationReached
|
||||||
|
|
||||||
@ -544,7 +554,7 @@ class Core:
|
|||||||
if unsaved:
|
if unsaved:
|
||||||
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
|
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
|
||||||
else:
|
else:
|
||||||
execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;'
|
execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;'
|
||||||
rows = list()
|
rows = list()
|
||||||
for row in c.execute(execute):
|
for row in c.execute(execute):
|
||||||
for i in row:
|
for i in row:
|
||||||
|
@ -588,6 +588,9 @@ class Onionr:
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
#TODO make runable on windows
|
#TODO make runable on windows
|
||||||
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
|
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
|
||||||
|
# Print nice header thing :)
|
||||||
|
if config.get('general.display_header', True):
|
||||||
|
self.header()
|
||||||
logger.debug('Started communicator')
|
logger.debug('Started communicator')
|
||||||
events.event('daemon_start', onionr = self)
|
events.event('daemon_start', onionr = self)
|
||||||
try:
|
try:
|
||||||
@ -759,5 +762,12 @@ class Onionr:
|
|||||||
print('Opening %s ...' % url)
|
print('Opening %s ...' % url)
|
||||||
webbrowser.open(url, new = 1, autoraise = True)
|
webbrowser.open(url, new = 1, autoraise = True)
|
||||||
|
|
||||||
|
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
|
||||||
|
if os.path.exists('static-data/header.txt'):
|
||||||
|
with open('static-data/header.txt', 'rb') as file:
|
||||||
|
# only to stdout, not file or log or anything
|
||||||
|
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', ONIONR_VERSION))
|
||||||
|
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
Onionr()
|
Onionr()
|
||||||
|
@ -61,5 +61,11 @@ class DaemonTools:
|
|||||||
if not self.daemon._core._utils.checkNetwork():
|
if not self.daemon._core._utils.checkNetwork():
|
||||||
logger.warn('Network check failed, are you connected to the internet?')
|
logger.warn('Network check failed, are you connected to the internet?')
|
||||||
self.daemon.isOnline = False
|
self.daemon.isOnline = False
|
||||||
|
|
||||||
self.daemon.decrementThreadCount('netCheck')
|
self.daemon.decrementThreadCount('netCheck')
|
||||||
|
|
||||||
|
def cleanOldBlocks(self):
|
||||||
|
'''Delete old blocks if our disk allocation is full/near full'''
|
||||||
|
if self.daemon._core._utils.storageCounter.isFull():
|
||||||
|
|
||||||
|
|
||||||
|
self.daemon.decrementThreadCount('cleanOldBlocks')
|
@ -61,5 +61,5 @@ class InvalidAddress(Exception):
|
|||||||
|
|
||||||
# file exceptions
|
# file exceptions
|
||||||
|
|
||||||
class DiskAllocationReached:
|
class DiskAllocationReached(Exception):
|
||||||
pass
|
pass
|
@ -384,10 +384,6 @@ class OnionrUtils:
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
retData = True
|
retData = True
|
||||||
if retData:
|
|
||||||
# Executes if data not seen
|
|
||||||
with open(self._core.dataNonceFile, 'a') as nonceFile:
|
|
||||||
nonceFile.write(nonce + '\n')
|
|
||||||
else:
|
else:
|
||||||
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
|
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@
|
|||||||
},
|
},
|
||||||
|
|
||||||
"allocations":{
|
"allocations":{
|
||||||
"disk": 9000000000,
|
"disk": 800,
|
||||||
"netTotal": 1000000000,
|
"netTotal": 1000000000,
|
||||||
"blockCache": 5000000,
|
"blockCache": 5000000,
|
||||||
"blockCacheTotal": 50000000
|
"blockCacheTotal": 50000000
|
||||||
|
@ -27,7 +27,7 @@ class StorageCounter:
|
|||||||
|
|
||||||
def isFull(self):
|
def isFull(self):
|
||||||
retData = False
|
retData = False
|
||||||
if self._core.config.get('allocations.disk') <= self.getAmount():
|
if self._core.config.get('allocations.disk') <= (self.getAmount() + 100):
|
||||||
retData = True
|
retData = True
|
||||||
return retData
|
return retData
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user