Merge branch 'config-all-the-things' into 'master'
Configuration for more things See merge request beardog/Onionr!8
This commit is contained in:
commit
f15d00f61f
@ -39,8 +39,12 @@ def importBlockFromData(content, coreInst):
|
|||||||
if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid
|
if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid
|
||||||
if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
|
if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
|
||||||
logger.info('Block passed proof, saving.')
|
logger.info('Block passed proof, saving.')
|
||||||
blockHash = coreInst.setData(content)
|
try:
|
||||||
coreInst.addToBlockDB(blockHash, dataSaved=True)
|
blockHash = coreInst.setData(content)
|
||||||
coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
except onionrexceptions.DiskAllocationReached:
|
||||||
retData = True
|
pass
|
||||||
|
else:
|
||||||
|
coreInst.addToBlockDB(blockHash, dataSaved=True)
|
||||||
|
coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
||||||
|
retData = True
|
||||||
return retData
|
return retData
|
@ -27,6 +27,8 @@ from defusedxml import minidom
|
|||||||
class OnionrCommunicatorDaemon:
|
class OnionrCommunicatorDaemon:
|
||||||
def __init__(self, debug, developmentMode):
|
def __init__(self, debug, developmentMode):
|
||||||
|
|
||||||
|
self.isOnline = True # Assume we're connected to the internet
|
||||||
|
|
||||||
# list of timer instances
|
# list of timer instances
|
||||||
self.timers = []
|
self.timers = []
|
||||||
|
|
||||||
@ -78,21 +80,19 @@ class OnionrCommunicatorDaemon:
|
|||||||
if debug or developmentMode:
|
if debug or developmentMode:
|
||||||
OnionrCommunicatorTimers(self, self.heartbeat, 10)
|
OnionrCommunicatorTimers(self, self.heartbeat, 10)
|
||||||
|
|
||||||
# Print nice header thing :)
|
|
||||||
if config.get('general.display_header', True) and not self.shutdown:
|
|
||||||
self.header()
|
|
||||||
|
|
||||||
# Set timers, function reference, seconds
|
# Set timers, function reference, seconds
|
||||||
# requiresPeer True means the timer function won't fire if we have no connected peers
|
# requiresPeer True means the timer function won't fire if we have no connected peers
|
||||||
# TODO: make some of these timer counts configurable
|
# TODO: make some of these timer counts configurable
|
||||||
OnionrCommunicatorTimers(self, self.daemonCommands, 5)
|
OnionrCommunicatorTimers(self, self.daemonCommands, 5)
|
||||||
OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
|
OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
|
||||||
peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60)
|
peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60)
|
||||||
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
|
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1)
|
||||||
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
||||||
|
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
|
||||||
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
||||||
|
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
||||||
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
|
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
|
||||||
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
|
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
|
||||||
|
|
||||||
@ -114,14 +114,14 @@ class OnionrCommunicatorDaemon:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
logger.info('Goodbye.')
|
logger.info('Goodbye.')
|
||||||
self._core._utils.localCommand('shutdown')
|
self._core._utils.localCommand('shutdown') # shutdown the api
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
def lookupKeys(self):
|
def lookupKeys(self):
|
||||||
'''Lookup new keys'''
|
'''Lookup new keys'''
|
||||||
logger.debug('Looking up new keys...')
|
logger.debug('Looking up new keys...')
|
||||||
tryAmount = 1
|
tryAmount = 1
|
||||||
for i in range(tryAmount):
|
for i in range(tryAmount): # amount of times to ask peers for new keys
|
||||||
# Download new key list from random online peers
|
# Download new key list from random online peers
|
||||||
peer = self.pickOnlinePeer()
|
peer = self.pickOnlinePeer()
|
||||||
newKeys = self.peerAction(peer, action='kex')
|
newKeys = self.peerAction(peer, action='kex')
|
||||||
@ -148,6 +148,12 @@ class OnionrCommunicatorDaemon:
|
|||||||
existingBlocks = self._core.getBlockList()
|
existingBlocks = self._core.getBlockList()
|
||||||
triedPeers = [] # list of peers we've tried this time around
|
triedPeers = [] # list of peers we've tried this time around
|
||||||
for i in range(tryAmount):
|
for i in range(tryAmount):
|
||||||
|
# check if disk allocation is used
|
||||||
|
if not self.isOnline:
|
||||||
|
break
|
||||||
|
if self._core._utils.storageCounter.isFull():
|
||||||
|
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||||
|
break
|
||||||
peer = self.pickOnlinePeer() # select random online peer
|
peer = self.pickOnlinePeer() # select random online peer
|
||||||
# if we've already tried all the online peers this time around, stop
|
# if we've already tried all the online peers this time around, stop
|
||||||
if peer in triedPeers:
|
if peer in triedPeers:
|
||||||
@ -162,7 +168,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
|
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
|
||||||
self._core.setAddressInfo(peer, 'DBHash', newDBHash)
|
self._core.setAddressInfo(peer, 'DBHash', newDBHash)
|
||||||
try:
|
try:
|
||||||
newBlocks = self.peerAction(peer, 'getBlockHashes')
|
newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.warn("could not get new blocks with " + peer, error=error)
|
logger.warn("could not get new blocks with " + peer, error=error)
|
||||||
newBlocks = False
|
newBlocks = False
|
||||||
@ -174,15 +180,18 @@ class OnionrCommunicatorDaemon:
|
|||||||
if not i in existingBlocks:
|
if not i in existingBlocks:
|
||||||
# if block does not exist on disk and is not already in block queue
|
# if block does not exist on disk and is not already in block queue
|
||||||
if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i):
|
if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i):
|
||||||
self.blockQueue.append(i)
|
self.blockQueue.append(i) # add blocks to download queue
|
||||||
self.decrementThreadCount('lookupBlocks')
|
self.decrementThreadCount('lookupBlocks')
|
||||||
return
|
return
|
||||||
|
|
||||||
def getBlocks(self):
|
def getBlocks(self):
|
||||||
'''download new blocks in queue'''
|
'''download new blocks in queue'''
|
||||||
for blockHash in self.blockQueue:
|
for blockHash in self.blockQueue:
|
||||||
if self.shutdown:
|
removeFromQueue = True
|
||||||
|
if self.shutdown or not self.isOnline:
|
||||||
|
# Exit loop if shutting down or offline
|
||||||
break
|
break
|
||||||
|
# Do not download blocks being downloaded or that are already saved (edge cases)
|
||||||
if blockHash in self.currentDownloading:
|
if blockHash in self.currentDownloading:
|
||||||
logger.debug('ALREADY DOWNLOADING ' + blockHash)
|
logger.debug('ALREADY DOWNLOADING ' + blockHash)
|
||||||
continue
|
continue
|
||||||
@ -190,7 +199,11 @@ class OnionrCommunicatorDaemon:
|
|||||||
logger.debug('%s is already saved' % (blockHash,))
|
logger.debug('%s is already saved' % (blockHash,))
|
||||||
self.blockQueue.remove(blockHash)
|
self.blockQueue.remove(blockHash)
|
||||||
continue
|
continue
|
||||||
self.currentDownloading.append(blockHash)
|
if self._core._blacklist.inBlacklist(blockHash):
|
||||||
|
continue
|
||||||
|
if self._core._utils.storageCounter.isFull():
|
||||||
|
break
|
||||||
|
self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
||||||
logger.info("Attempting to download %s..." % blockHash)
|
logger.info("Attempting to download %s..." % blockHash)
|
||||||
peerUsed = self.pickOnlinePeer()
|
peerUsed = self.pickOnlinePeer()
|
||||||
content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata)
|
content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata)
|
||||||
@ -212,10 +225,15 @@ class OnionrCommunicatorDaemon:
|
|||||||
#meta = metas[1]
|
#meta = metas[1]
|
||||||
if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||||
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
|
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
|
||||||
logger.info('Block passed proof, saving.')
|
logger.info('Block passed proof, attempting save.')
|
||||||
self._core.setData(content)
|
try:
|
||||||
self._core.addToBlockDB(blockHash, dataSaved=True)
|
self._core.setData(content)
|
||||||
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
except onionrexceptions.DiskAllocationReached:
|
||||||
|
logger.error("Reached disk allocation allowance, cannot save this block.")
|
||||||
|
removeFromQueue = False
|
||||||
|
else:
|
||||||
|
self._core.addToBlockDB(blockHash, dataSaved=True)
|
||||||
|
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
||||||
else:
|
else:
|
||||||
logger.warn('POW failed for block ' + blockHash)
|
logger.warn('POW failed for block ' + blockHash)
|
||||||
else:
|
else:
|
||||||
@ -234,7 +252,8 @@ class OnionrCommunicatorDaemon:
|
|||||||
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
||||||
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
|
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
|
||||||
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
|
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
|
||||||
self.blockQueue.remove(blockHash) # remove from block queue both if success or false
|
if removeFromQueue:
|
||||||
|
self.blockQueue.remove(blockHash) # remove from block queue both if success or false
|
||||||
self.currentDownloading.remove(blockHash)
|
self.currentDownloading.remove(blockHash)
|
||||||
self.decrementThreadCount('getBlocks')
|
self.decrementThreadCount('getBlocks')
|
||||||
return
|
return
|
||||||
@ -476,13 +495,6 @@ class OnionrCommunicatorDaemon:
|
|||||||
self.shutdown = True
|
self.shutdown = True
|
||||||
self.decrementThreadCount('detectAPICrash')
|
self.decrementThreadCount('detectAPICrash')
|
||||||
|
|
||||||
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
|
|
||||||
if os.path.exists('static-data/header.txt'):
|
|
||||||
with open('static-data/header.txt', 'rb') as file:
|
|
||||||
# only to stdout, not file or log or anything
|
|
||||||
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION))
|
|
||||||
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
|
|
||||||
|
|
||||||
class OnionrCommunicatorTimers:
|
class OnionrCommunicatorTimers:
|
||||||
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
|
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
|
||||||
self.timerFunction = timerFunction
|
self.timerFunction = timerFunction
|
||||||
|
@ -50,6 +50,9 @@ class Core:
|
|||||||
self.dbCreate = dbcreator.DBCreator(self)
|
self.dbCreate = dbcreator.DBCreator(self)
|
||||||
|
|
||||||
self.usageFile = 'data/disk-usage.txt'
|
self.usageFile = 'data/disk-usage.txt'
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
self.maxBlockSize = 10000000 # max block size in bytes
|
||||||
|
|
||||||
if not os.path.exists('data/'):
|
if not os.path.exists('data/'):
|
||||||
os.mkdir('data/')
|
os.mkdir('data/')
|
||||||
@ -174,6 +177,8 @@ class Core:
|
|||||||
def removeBlock(self, block):
|
def removeBlock(self, block):
|
||||||
'''
|
'''
|
||||||
remove a block from this node (does not automatically blacklist)
|
remove a block from this node (does not automatically blacklist)
|
||||||
|
|
||||||
|
**You may want blacklist.addToDB(blockHash)
|
||||||
'''
|
'''
|
||||||
if self._utils.validateHash(block):
|
if self._utils.validateHash(block):
|
||||||
conn = sqlite3.connect(self.blockDB)
|
conn = sqlite3.connect(self.blockDB)
|
||||||
@ -182,8 +187,16 @@ class Core:
|
|||||||
c.execute('Delete from hashes where hash=?;', t)
|
c.execute('Delete from hashes where hash=?;', t)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
blockFile = 'data/blocks/' + block + '.dat'
|
||||||
|
dataSize = 0
|
||||||
try:
|
try:
|
||||||
os.remove('data/blocks/' + block + '.dat')
|
''' Get size of data when loaded as an object/var, rather than on disk,
|
||||||
|
to avoid conflict with getsizeof when saving blocks
|
||||||
|
'''
|
||||||
|
with open(blockFile, 'r') as data:
|
||||||
|
dataSize = sys.getsizeof(data.read())
|
||||||
|
self._utils.storageCounter.removeBytes(dataSize)
|
||||||
|
os.remove(blockFile)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -256,6 +269,8 @@ class Core:
|
|||||||
Set the data assciated with a hash
|
Set the data assciated with a hash
|
||||||
'''
|
'''
|
||||||
data = data
|
data = data
|
||||||
|
dataSize = sys.getsizeof(data)
|
||||||
|
|
||||||
if not type(data) is bytes:
|
if not type(data) is bytes:
|
||||||
data = data.encode()
|
data = data.encode()
|
||||||
|
|
||||||
@ -268,15 +283,19 @@ class Core:
|
|||||||
pass # TODO: properly check if block is already saved elsewhere
|
pass # TODO: properly check if block is already saved elsewhere
|
||||||
#raise Exception("Data is already set for " + dataHash)
|
#raise Exception("Data is already set for " + dataHash)
|
||||||
else:
|
else:
|
||||||
blockFile = open(blockFileName, 'wb')
|
if self._utils.storageCounter.addBytes(dataSize) != False:
|
||||||
blockFile.write(data)
|
blockFile = open(blockFileName, 'wb')
|
||||||
blockFile.close()
|
blockFile.write(data)
|
||||||
|
blockFile.close()
|
||||||
conn = sqlite3.connect(self.blockDB)
|
conn = sqlite3.connect(self.blockDB)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
|
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
with open(self.dataNonceFile, 'a') as nonceFile:
|
||||||
|
nonceFile.write(dataHash + '\n')
|
||||||
|
else:
|
||||||
|
raise onionrexceptions.DiskAllocationReached
|
||||||
|
|
||||||
return dataHash
|
return dataHash
|
||||||
|
|
||||||
@ -539,7 +558,7 @@ class Core:
|
|||||||
if unsaved:
|
if unsaved:
|
||||||
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
|
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
|
||||||
else:
|
else:
|
||||||
execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;'
|
execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;'
|
||||||
rows = list()
|
rows = list()
|
||||||
for row in c.execute(execute):
|
for row in c.execute(execute):
|
||||||
for i in row:
|
for i in row:
|
||||||
|
@ -588,6 +588,9 @@ class Onionr:
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
#TODO make runable on windows
|
#TODO make runable on windows
|
||||||
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
|
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
|
||||||
|
# Print nice header thing :)
|
||||||
|
if config.get('general.display_header', True):
|
||||||
|
self.header()
|
||||||
logger.debug('Started communicator')
|
logger.debug('Started communicator')
|
||||||
events.event('daemon_start', onionr = self)
|
events.event('daemon_start', onionr = self)
|
||||||
try:
|
try:
|
||||||
@ -759,5 +762,12 @@ class Onionr:
|
|||||||
print('Opening %s ...' % url)
|
print('Opening %s ...' % url)
|
||||||
webbrowser.open(url, new = 1, autoraise = True)
|
webbrowser.open(url, new = 1, autoraise = True)
|
||||||
|
|
||||||
|
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
|
||||||
|
if os.path.exists('static-data/header.txt'):
|
||||||
|
with open('static-data/header.txt', 'rb') as file:
|
||||||
|
# only to stdout, not file or log or anything
|
||||||
|
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', ONIONR_VERSION))
|
||||||
|
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
Onionr()
|
Onionr()
|
||||||
|
@ -53,4 +53,21 @@ class DaemonTools:
|
|||||||
if self.daemon._core._utils.doPostRequest(url, data) == 'Success':
|
if self.daemon._core._utils.doPostRequest(url, data) == 'Success':
|
||||||
retData = True
|
retData = True
|
||||||
self.daemon.decrementThreadCount('announceNode')
|
self.daemon.decrementThreadCount('announceNode')
|
||||||
return retData
|
return retData
|
||||||
|
|
||||||
|
def netCheck(self):
|
||||||
|
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
||||||
|
if len(self.daemon.onlinePeers) != 0:
|
||||||
|
if not self.daemon._core._utils.checkNetwork(torPort=self.daemon.proxyPort):
|
||||||
|
logger.warn('Network check failed, are you connected to the internet?')
|
||||||
|
self.daemon.isOnline = False
|
||||||
|
self.daemon.decrementThreadCount('netCheck')
|
||||||
|
|
||||||
|
def cleanOldBlocks(self):
|
||||||
|
'''Delete old blocks if our disk allocation is full/near full'''
|
||||||
|
while self.daemon._core._utils.storageCounter.isFull():
|
||||||
|
oldest = self.daemon._core.getBlockList()[0]
|
||||||
|
self.daemon._core._blacklist.addToDB(oldest)
|
||||||
|
self.daemon._core.removeBlock(oldest)
|
||||||
|
logger.info('Deleted block: %s' % (oldest,))
|
||||||
|
self.daemon.decrementThreadCount('cleanOldBlocks')
|
@ -58,3 +58,8 @@ class MissingPort(Exception):
|
|||||||
|
|
||||||
class InvalidAddress(Exception):
|
class InvalidAddress(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# file exceptions
|
||||||
|
|
||||||
|
class DiskAllocationReached(Exception):
|
||||||
|
pass
|
@ -90,7 +90,11 @@ def peerCleanup(coreInst):
|
|||||||
if PeerProfiles(address, coreInst).score < minScore:
|
if PeerProfiles(address, coreInst).score < minScore:
|
||||||
coreInst.removeAddress(address)
|
coreInst.removeAddress(address)
|
||||||
try:
|
try:
|
||||||
coreInst._blacklist.addToDB(address, dataType=1, expire=300)
|
if (coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600:
|
||||||
|
expireTime = 600
|
||||||
|
else:
|
||||||
|
expireTime = 86400
|
||||||
|
coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime)
|
||||||
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
|
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
|
||||||
pass
|
pass
|
||||||
logger.warn('Removed address ' + address + '.')
|
logger.warn('Removed address ' + address + '.')
|
||||||
|
@ -23,7 +23,7 @@ import nacl.signing, nacl.encoding
|
|||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
import onionrexceptions
|
import onionrexceptions
|
||||||
from defusedxml import minidom
|
from defusedxml import minidom
|
||||||
import pgpwords
|
import pgpwords, storagecounter
|
||||||
if sys.version_info < (3, 6):
|
if sys.version_info < (3, 6):
|
||||||
try:
|
try:
|
||||||
import sha3
|
import sha3
|
||||||
@ -40,9 +40,9 @@ class OnionrUtils:
|
|||||||
self._core = coreInstance
|
self._core = coreInstance
|
||||||
|
|
||||||
self.timingToken = ''
|
self.timingToken = ''
|
||||||
|
|
||||||
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
|
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
|
||||||
self.peerProcessing = {} # dict of current peer actions: peer, actionList
|
self.peerProcessing = {} # dict of current peer actions: peer, actionList
|
||||||
|
self.storageCounter = storagecounter.StorageCounter(self._core)
|
||||||
config.reload()
|
config.reload()
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -131,8 +131,12 @@ class OnionrUtils:
|
|||||||
if not config.get('tor.v3onions') and len(adder) == 62:
|
if not config.get('tor.v3onions') and len(adder) == 62:
|
||||||
continue
|
continue
|
||||||
if self._core.addAddress(adder):
|
if self._core.addAddress(adder):
|
||||||
logger.info('Added %s to db.' % adder, timestamp = True)
|
# Check if we have the maxmium amount of allowed stored peers
|
||||||
retVal = True
|
if config.get('peers.maxStoredPeers') > len(self._core.listAdders()):
|
||||||
|
logger.info('Added %s to db.' % adder, timestamp = True)
|
||||||
|
retVal = True
|
||||||
|
else:
|
||||||
|
logger.warn('Reached the maximum amount of peers in the net database as allowed by your config.')
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
#logger.debug('%s is either our address or already in our DB' % adder)
|
#logger.debug('%s is either our address or already in our DB' % adder)
|
||||||
@ -380,10 +384,6 @@ class OnionrUtils:
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
retData = True
|
retData = True
|
||||||
if retData:
|
|
||||||
# Executes if data not seen
|
|
||||||
with open(self._core.dataNonceFile, 'a') as nonceFile:
|
|
||||||
nonceFile.write(nonce + '\n')
|
|
||||||
else:
|
else:
|
||||||
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
|
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
|
||||||
|
|
||||||
@ -630,6 +630,22 @@ class OnionrUtils:
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def checkNetwork(self, torPort=0):
|
||||||
|
'''Check if we are connected to the internet (through Tor)'''
|
||||||
|
retData = False
|
||||||
|
connectURLs = []
|
||||||
|
try:
|
||||||
|
with open('static-data/connect-check.txt', 'r') as connectTest:
|
||||||
|
connectURLs = connectTest.read().split(',')
|
||||||
|
|
||||||
|
for url in connectURLs:
|
||||||
|
if self.doGetRequest(url, port=torPort) != False:
|
||||||
|
retData = True
|
||||||
|
break
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
return retData
|
||||||
|
|
||||||
def size(path='.'):
|
def size(path='.'):
|
||||||
'''
|
'''
|
||||||
@ -655,4 +671,4 @@ def humanSize(num, suffix='B'):
|
|||||||
if abs(num) < 1024.0:
|
if abs(num) < 1024.0:
|
||||||
return "%.1f %s%s" % (num, unit, suffix)
|
return "%.1f %s%s" % (num, unit, suffix)
|
||||||
num /= 1024.0
|
num /= 1024.0
|
||||||
return "%.1f %s%s" % (num, 'Yi', suffix)
|
return "%.1f %s%s" % (num, 'Yi', suffix)
|
@ -51,14 +51,18 @@
|
|||||||
},
|
},
|
||||||
|
|
||||||
"allocations":{
|
"allocations":{
|
||||||
"disk": 9000000000,
|
"disk": 10000000000,
|
||||||
"netTotal": 1000000000,
|
"netTotal": 1000000000,
|
||||||
"blockCache": 5000000,
|
"blockCache": 5000000,
|
||||||
"blockCacheTotal": 50000000
|
"blockCacheTotal": 50000000
|
||||||
},
|
},
|
||||||
"peers":{
|
"peers":{
|
||||||
"minimumScore": -100,
|
"minimumScore": -100,
|
||||||
"maxStoredPeers": 500,
|
"maxStoredPeers": 5000,
|
||||||
"maxConnect": 5
|
"maxConnect": 5
|
||||||
|
},
|
||||||
|
"timers":{
|
||||||
|
"lookupBlocks": 25,
|
||||||
|
"getBlocks": 30
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
61
onionr/storagecounter.py
Normal file
61
onionr/storagecounter.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
'''
|
||||||
|
Onionr - P2P Microblogging Platform & Social network.
|
||||||
|
|
||||||
|
Keeps track of how much disk space we're using
|
||||||
|
'''
|
||||||
|
'''
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
import config
|
||||||
|
|
||||||
|
class StorageCounter:
|
||||||
|
def __init__(self, coreInst):
|
||||||
|
self._core = coreInst
|
||||||
|
self.dataFile = self._core.usageFile
|
||||||
|
return
|
||||||
|
|
||||||
|
def isFull(self):
|
||||||
|
retData = False
|
||||||
|
if self._core.config.get('allocations.disk') <= (self.getAmount() + 1000):
|
||||||
|
retData = True
|
||||||
|
return retData
|
||||||
|
|
||||||
|
def _update(self, data):
|
||||||
|
with open(self.dataFile, 'w') as dataFile:
|
||||||
|
dataFile.write(str(data))
|
||||||
|
def getAmount(self):
|
||||||
|
'''Return how much disk space we're using (according to record)'''
|
||||||
|
retData = 0
|
||||||
|
try:
|
||||||
|
with open(self.dataFile, 'r') as dataFile:
|
||||||
|
retData = int(dataFile.read())
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
return retData
|
||||||
|
|
||||||
|
def addBytes(self, amount):
|
||||||
|
'''Record that we are now using more disk space, unless doing so would exceed configured max'''
|
||||||
|
newAmount = amount + self.getAmount()
|
||||||
|
retData = newAmount
|
||||||
|
if newAmount > self._core.config.get('allocations.disk'):
|
||||||
|
retData = False
|
||||||
|
else:
|
||||||
|
self._update(newAmount)
|
||||||
|
return retData
|
||||||
|
|
||||||
|
def removeBytes(self, amount):
|
||||||
|
'''Record that we are now using less disk space'''
|
||||||
|
newAmount = self.getAmount() - amount
|
||||||
|
self._update(newAmount)
|
||||||
|
return newAmount
|
Loading…
Reference in New Issue
Block a user