This commit is contained in:
Arinerron 2018-09-24 16:48:00 -07:00
parent 7738de1c28
commit 6e55636e78
9 changed files with 72 additions and 59 deletions

View File

@ -150,14 +150,17 @@ class API:
self.validateHost('private') self.validateHost('private')
if config.get('www.public.guess_mime', True):
self.mimeType = API.guessMime(path)
endTime = math.floor(time.time()) endTime = math.floor(time.time())
elapsed = endTime - startTime elapsed = endTime - startTime
if not hmac.compare_digest(timingToken, self.timeBypassToken): if not hmac.compare_digest(timingToken, self.timeBypassToken):
if elapsed < self._privateDelayTime: if (elapsed < self._privateDelayTime) and config.get('www.private.timing_protection', True):
time.sleep(self._privateDelayTime - elapsed) time.sleep(self._privateDelayTime - elapsed)
return send_from_directory('static-data/www/private/', path) return send_from_directory(config.get('www.private.path', 'static-data/www/private/'), path)
@app.route('/www/public/<path:path>') @app.route('/www/public/<path:path>')
def www_public(path): def www_public(path):
@ -166,7 +169,10 @@ class API:
self.validateHost('public') self.validateHost('public')
return send_from_directory('static-data/www/public/', path) if config.get('www.public.guess_mime', True):
self.mimeType = API.guessMime(path)
return send_from_directory(config.get('www.public.path', 'static-data/www/public/'), path)
@app.route('/ui/<path:path>') @app.route('/ui/<path:path>')
def ui_private(path): def ui_private(path):

View File

@ -87,8 +87,8 @@ class OnionrCommunicatorDaemon:
OnionrCommunicatorTimers(self, self.daemonCommands, 5) OnionrCommunicatorTimers(self, self.daemonCommands, 5)
OnionrCommunicatorTimers(self, self.detectAPICrash, 5) OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1) peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1)
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookup_blocks'), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True) OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.get_blocks'), requiresPeer=True)
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65) OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
@ -252,7 +252,7 @@ class OnionrCommunicatorDaemon:
except AttributeError: except AttributeError:
pass pass
# Punish peer for sharing invalid block (not always malicious, but is bad regardless) # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50) onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
if removeFromQueue: if removeFromQueue:
self.blockQueue.remove(blockHash) # remove from block queue both if success or false self.blockQueue.remove(blockHash) # remove from block queue both if success or false
@ -298,7 +298,7 @@ class OnionrCommunicatorDaemon:
'''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected''' '''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected'''
logger.info('Refreshing peer pool.') logger.info('Refreshing peer pool.')
maxPeers = int(config.get('peers.maxConnect')) maxPeers = int(config.get('peers.max_connect', 10))
needed = maxPeers - len(self.onlinePeers) needed = maxPeers - len(self.onlinePeers)
for i in range(needed): for i in range(needed):
@ -331,7 +331,7 @@ class OnionrCommunicatorDaemon:
raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address') raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address')
else: else:
peerList = self._core.listAdders() peerList = self._core.listAdders()
peerList = onionrpeers.getScoreSortedPeerList(self._core) peerList = onionrpeers.getScoreSortedPeerList(self._core)
if len(peerList) == 0 or useBootstrap: if len(peerList) == 0 or useBootstrap:
@ -339,7 +339,7 @@ class OnionrCommunicatorDaemon:
self.addBootstrapListToPeerList(peerList) self.addBootstrapListToPeerList(peerList)
for address in peerList: for address in peerList:
if not config.get('tor.v3onions') and len(address) == 62: if not config.get('tor.v3_onions') and len(address) == 62:
continue continue
if len(address) == 0 or address in tried or address in self.onlinePeers or address in self.cooldownPeer: if len(address) == 0 or address in tried or address in self.onlinePeers or address in self.cooldownPeer:
continue continue
@ -352,7 +352,7 @@ class OnionrCommunicatorDaemon:
self.onlinePeers.append(address) self.onlinePeers.append(address)
self.connectTimes[address] = self._core._utils.getEpoch() self.connectTimes[address] = self._core._utils.getEpoch()
retData = address retData = address
# add peer to profile list if they're not in it # add peer to profile list if they're not in it
for profile in self.peerProfiles: for profile in self.peerProfiles:
if profile.address == address: if profile.address == address:
@ -416,7 +416,7 @@ class OnionrCommunicatorDaemon:
self._core.setAddressInfo(peer, 'lastConnect', self._core._utils.getEpoch()) self._core.setAddressInfo(peer, 'lastConnect', self._core._utils.getEpoch())
self.getPeerProfileInstance(peer).addScore(1) self.getPeerProfileInstance(peer).addScore(1)
return retData return retData
def getPeerProfileInstance(self, peer): def getPeerProfileInstance(self, peer):
'''Gets a peer profile instance from the list of profiles, by address name''' '''Gets a peer profile instance from the list of profiles, by address name'''
for i in self.peerProfiles: for i in self.peerProfiles:
@ -543,7 +543,7 @@ class OnionrCommunicatorTimers:
if self.makeThread: if self.makeThread:
for i in range(self.threadAmount): for i in range(self.threadAmount):
if self.daemonInstance.threadCounts[self.timerFunction.__name__] >= self.maxThreads: if self.daemonInstance.threadCounts[self.timerFunction.__name__] >= self.maxThreads:
logger.warn(self.timerFunction.__name__ + ' has too many current threads to start anymore.') logger.warn('%s is currently using the maximum number of threads, not starting another.' % self.timerFunction.__name__)
else: else:
self.daemonInstance.threadCounts[self.timerFunction.__name__] += 1 self.daemonInstance.threadCounts[self.timerFunction.__name__] += 1
newThread = threading.Thread(target=self.timerFunction) newThread = threading.Thread(target=self.timerFunction)

View File

@ -126,7 +126,7 @@ class Core:
''' '''
Add an address to the address database (only tor currently) Add an address to the address database (only tor currently)
''' '''
if address == config.get('i2p.ownAddr', None): if address == config.get('i2p.own_addr', None):
return False return False
if self._utils.validateID(address): if self._utils.validateID(address):
@ -190,7 +190,7 @@ class Core:
blockFile = 'data/blocks/' + block + '.dat' blockFile = 'data/blocks/' + block + '.dat'
dataSize = 0 dataSize = 0
try: try:
''' Get size of data when loaded as an object/var, rather than on disk, ''' Get size of data when loaded as an object/var, rather than on disk,
to avoid conflict with getsizeof when saving blocks to avoid conflict with getsizeof when saving blocks
''' '''
with open(blockFile, 'r') as data: with open(blockFile, 'r') as data:
@ -273,7 +273,7 @@ class Core:
if not type(data) is bytes: if not type(data) is bytes:
data = data.encode() data = data.encode()
dataHash = self._getSha3Hash(data) dataHash = self._getSha3Hash(data)
if type(dataHash) is bytes: if type(dataHash) is bytes:
@ -722,7 +722,7 @@ class Core:
metadata['sig'] = signature metadata['sig'] = signature
metadata['signer'] = signer metadata['signer'] = signer
metadata['time'] = str(self._utils.getEpoch()) metadata['time'] = str(self._utils.getEpoch())
# send block data (and metadata) to POW module to get tokenized block data # send block data (and metadata) to POW module to get tokenized block data
proof = onionrproofs.POW(metadata, data) proof = onionrproofs.POW(metadata, data)
payload = proof.waitForResult() payload = proof.waitForResult()

View File

@ -49,7 +49,7 @@ class NetController:
Generate a torrc file for our tor instance Generate a torrc file for our tor instance
''' '''
hsVer = '# v2 onions' hsVer = '# v2 onions'
if config.get('tor.v3onions'): if config.get('tor.v3_onions'):
hsVer = 'HiddenServiceVersion 3' hsVer = 'HiddenServiceVersion 3'
logger.info('Using v3 onions :)') logger.info('Using v3 onions :)')
if os.path.exists(self.torConfigLocation): if os.path.exists(self.torConfigLocation):

View File

@ -762,7 +762,7 @@ class Block:
return False return False
# dump old cached blocks if the size exeeds the maximum # dump old cached blocks if the size exeeds the maximum
if sys.getsizeof(Block.blockCacheOrder) >= config.get('allocations.blockCacheTotal', 50000000): # 50MB default cache size if sys.getsizeof(Block.blockCacheOrder) >= config.get('allocations.block_cache_total', 50000000): # 50MB default cache size
del Block.blockCache[blockCacheOrder.pop(0)] del Block.blockCache[blockCacheOrder.pop(0)]
# cache block content # cache block content

View File

@ -63,14 +63,14 @@ class DaemonTools:
logger.warn('Network check failed, are you connected to the internet?') logger.warn('Network check failed, are you connected to the internet?')
self.daemon.isOnline = False self.daemon.isOnline = False
self.daemon.decrementThreadCount('netCheck') self.daemon.decrementThreadCount('netCheck')
def cleanOldBlocks(self): def cleanOldBlocks(self):
'''Delete old blocks if our disk allocation is full/near full''' '''Delete old blocks if our disk allocation is full/near full'''
while self.daemon._core._utils.storageCounter.isFull(): while self.daemon._core._utils.storageCounter.isFull():
oldest = self.daemon._core.getBlockList()[0] oldest = self.daemon._core.getBlockList()[0]
self.daemon._core._blacklist.addToDB(oldest) self.daemon._core._blacklist.addToDB(oldest)
self.daemon._core.removeBlock(oldest) self.daemon._core.removeBlock(oldest)
logger.info('Deleted block: %s' % (oldest,)) logger.info('Deleted block: %s' % (oldest,))
self.daemon.decrementThreadCount('cleanOldBlocks') self.daemon.decrementThreadCount('cleanOldBlocks')
def cooldownPeer(self): def cooldownPeer(self):
@ -88,7 +88,7 @@ class DaemonTools:
del self.daemon.cooldownPeer[peer] del self.daemon.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough # Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= self.daemon._core.config.get('peers.maxConnect'): if onlinePeerAmount >= self.daemon._core.config.get('peers.max_connect', 10):
finding = True finding = True
while finding: while finding:
try: try:
@ -102,4 +102,4 @@ class DaemonTools:
else: else:
self.daemon.removeOnlinePeer(toCool) self.daemon.removeOnlinePeer(toCool)
self.daemon.cooldownPeer[toCool] = self.daemon._core._utils.getEpoch() self.daemon.cooldownPeer[toCool] = self.daemon._core._utils.getEpoch()
self.daemon.decrementThreadCount('cooldownPeer') self.daemon.decrementThreadCount('cooldownPeer')

View File

@ -44,7 +44,7 @@ class PeerProfiles:
except (TypeError, ValueError) as e: except (TypeError, ValueError) as e:
self.success = 0 self.success = 0
self.score = self.success self.score = self.success
def saveScore(self): def saveScore(self):
'''Save the node's score to the database''' '''Save the node's score to the database'''
self.coreInst.setAddressInfo(self.address, 'success', self.score) self.coreInst.setAddressInfo(self.address, 'success', self.score)
@ -79,8 +79,8 @@ def peerCleanup(coreInst):
logger.info('Cleaning peers...') logger.info('Cleaning peers...')
config.reload() config.reload()
minScore = int(config.get('peers.minimumScore')) minScore = int(config.get('peers.minimum_score', -100))
maxPeers = int(config.get('peers.maxStoredPeers')) maxPeers = int(config.get('peers.max_stored', 5000))
adders = getScoreSortedPeerList(coreInst) adders = getScoreSortedPeerList(coreInst)
adders.reverse() adders.reverse()
@ -102,4 +102,4 @@ def peerCleanup(coreInst):
logger.warn('Removed address ' + address + '.') logger.warn('Removed address ' + address + '.')
# Unban probably not malicious peers TODO improve # Unban probably not malicious peers TODO improve
coreInst._blacklist.deleteExpired(dataType=1) coreInst._blacklist.deleteExpired(dataType=1)

View File

@ -131,11 +131,11 @@ class OnionrUtils:
for adder in newAdderList.split(','): for adder in newAdderList.split(','):
adder = adder.strip() adder = adder.strip()
if not adder in self._core.listAdders(randomOrder = False) and adder != self.getMyAddress() and not self._core._blacklist.inBlacklist(adder): if not adder in self._core.listAdders(randomOrder = False) and adder != self.getMyAddress() and not self._core._blacklist.inBlacklist(adder):
if not config.get('tor.v3onions') and len(adder) == 62: if not config.get('tor.v3_onions') and len(adder) == 62:
continue continue
if self._core.addAddress(adder): if self._core.addAddress(adder):
# Check if we have the maxmium amount of allowed stored peers # Check if we have the maxmium amount of allowed stored peers
if config.get('peers.maxStoredPeers') > len(self._core.listAdders()): if config.get('peers.max_stored') > len(self._core.listAdders()):
logger.info('Added %s to db.' % adder, timestamp = True) logger.info('Added %s to db.' % adder, timestamp = True)
retVal = True retVal = True
else: else:
@ -635,7 +635,7 @@ class OnionrUtils:
else: else:
self.powSalt = retData self.powSalt = retData
return retData return retData
def strToBytes(self, data): def strToBytes(self, data):
try: try:
data = data.encode() data = data.encode()
@ -648,7 +648,7 @@ class OnionrUtils:
except AttributeError: except AttributeError:
pass pass
return data return data
def checkNetwork(self, torPort=0): def checkNetwork(self, torPort=0):
'''Check if we are connected to the internet (through Tor)''' '''Check if we are connected to the internet (through Tor)'''
retData = False retData = False
@ -656,7 +656,7 @@ class OnionrUtils:
try: try:
with open('static-data/connect-check.txt', 'r') as connectTest: with open('static-data/connect-check.txt', 'r') as connectTest:
connectURLs = connectTest.read().split(',') connectURLs = connectTest.read().split(',')
for url in connectURLs: for url in connectURLs:
if self.doGetRequest(url, port=torPort) != False: if self.doGetRequest(url, port=torPort) != False:
retData = True retData = True
@ -689,4 +689,4 @@ def humanSize(num, suffix='B'):
if abs(num) < 1024.0: if abs(num) < 1024.0:
return "%.1f %s%s" % (num, unit, suffix) return "%.1f %s%s" % (num, unit, suffix)
num /= 1024.0 num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix) return "%.1f %s%s" % (num, 'Yi', suffix)

View File

@ -1,6 +1,6 @@
{ {
"general" : { "general" : {
"dev_mode": true, "dev_mode" : true,
"display_header" : true, "display_header" : true,
"direct_connect" : { "direct_connect" : {
@ -11,11 +11,16 @@
"www" : { "www" : {
"public" : { "public" : {
"run" : true "run" : true,
"path" : "static-data/www/public/",
"guess_mime" : true
}, },
"private" : { "private" : {
"run" : true "run" : true,
"path" : "static-data/www/private/",
"guess_mime" : true,
"timing_protection" : true
}, },
"ui" : { "ui" : {
@ -28,41 +33,43 @@
}, },
"log": { "log" : {
"file": { "file" : {
"output": true, "output" : true,
"path": "data/output.log" "path" : "data/output.log"
}, },
"console": { "console" : {
"output": true, "output" : true,
"color": true "color" : true
} }
}, },
"tor" : { "tor" : {
"v3onions": false "v3onions" : false
}, },
"i2p":{ "i2p" : {
"host": false, "host" : false,
"connect": true, "connect" : true,
"ownAddr": "" "own_addr" : ""
}, },
"allocations":{ "allocations" : {
"disk": 10000000000, "disk" : 10000000000,
"netTotal": 1000000000, "net_total" : 1000000000,
"blockCache": 5000000, "blockCache" : 5000000,
"blockCacheTotal": 50000000 "blockCacheTotal" : 50000000
}, },
"peers":{
"minimumScore": -100, "peers" : {
"maxStoredPeers": 5000, "minimum_score" : -100,
"maxConnect": 10 "max_stored_peers" : 5000,
"max_connect" : 10
}, },
"timers":{
"lookupBlocks": 25, "timers" : {
"getBlocks": 30 "lookup_blocks" : 25,
"get_blocks" : 30
} }
} }