Improve console output and status message
This commit is contained in:
parent
6b9f21fd08
commit
adc85c76c4
@ -50,7 +50,7 @@ class API:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
config.reload()
|
config.reload()
|
||||||
|
|
||||||
if config.get('devmode', True):
|
if config.get('devmode', True):
|
||||||
self._developmentMode = True
|
self._developmentMode = True
|
||||||
logger.set_level(logger.LEVEL_DEBUG)
|
logger.set_level(logger.LEVEL_DEBUG)
|
||||||
|
@ -303,7 +303,7 @@ class OnionrCommunicate:
|
|||||||
|
|
||||||
events.event('outgoing_direct_connection', data = {'callback' : True, 'communicator' : self, 'data' : data, 'id' : identifier, 'token' : token, 'peer' : peer, 'callback' : callback, 'log' : log})
|
events.event('outgoing_direct_connection', data = {'callback' : True, 'communicator' : self, 'data' : data, 'id' : identifier, 'token' : token, 'peer' : peer, 'callback' : callback, 'log' : log})
|
||||||
|
|
||||||
logger.debug('Direct connection (identifier: "%s"): %s' + (identifier, data_str))
|
logger.debug('Direct connection (identifier: "%s"): %s' % (identifier, data_str))
|
||||||
try:
|
try:
|
||||||
self.performGet('directMessage', peer, data_str)
|
self.performGet('directMessage', peer, data_str)
|
||||||
except:
|
except:
|
||||||
@ -416,26 +416,25 @@ class OnionrCommunicate:
|
|||||||
except IndexError:
|
except IndexError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
logger.info('Using ' + peerList[i] + ' to find new peers', timestamp=True)
|
logger.info('Using %s to find new peers...' % peerList[i], timestamp=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
newAdders = self.performGet('pex', peerList[i], skipHighFailureAddress=True)
|
newAdders = self.performGet('pex', peerList[i], skipHighFailureAddress=True)
|
||||||
logger.debug('Attempting to merge address: ')
|
if not newAdders is False: # keep the is False thing in there, it might not be bool
|
||||||
logger.debug(newAdders)
|
logger.debug('Attempting to merge address: %s' % str(newAdders))
|
||||||
self._utils.mergeAdders(newAdders)
|
self._utils.mergeAdders(newAdders)
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
logger.info(peerList[i] + ' connection failed', timestamp=True)
|
logger.info('%s connection failed' % peerList[i], timestamp=True)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
logger.info('Using ' + peerList[i] + ' to find new keys')
|
logger.info('Using %s to find new keys...' % peerList[i])
|
||||||
newKeys = self.performGet('kex', peerList[i], skipHighFailureAddress=True)
|
newKeys = self.performGet('kex', peerList[i], skipHighFailureAddress=True)
|
||||||
logger.debug('Attempting to merge pubkey: ')
|
logger.debug('Attempting to merge pubkey: %s' % str(newKeys))
|
||||||
logger.debug(newKeys)
|
|
||||||
# TODO: Require keys to come with POW token (very large amount of POW)
|
# TODO: Require keys to come with POW token (very large amount of POW)
|
||||||
self._utils.mergeKeys(newKeys)
|
self._utils.mergeKeys(newKeys)
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
logger.info(peerList[i] + ' connection failed', timestamp=True)
|
logger.info('%s connection failed' % peerList[i], timestamp=True)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
peersChecked += 1
|
peersChecked += 1
|
||||||
@ -462,24 +461,24 @@ class OnionrCommunicate:
|
|||||||
lastDB = self._core.getAddressInfo(i, 'DBHash')
|
lastDB = self._core.getAddressInfo(i, 'DBHash')
|
||||||
|
|
||||||
if lastDB == None:
|
if lastDB == None:
|
||||||
logger.debug('Fetching hash from ' + str(i) + ', no previous known.')
|
logger.debug('Fetching hash from %s, no previous known.' % str(i))
|
||||||
else:
|
else:
|
||||||
logger.debug('Fetching hash from ' + str(i) + ', ' + str(lastDB) + ' last known')
|
logger.debug('Fetching hash from %s, %s last known' % (str(i), str(lastDB)))
|
||||||
|
|
||||||
currentDB = self.performGet('getDBHash', i)
|
currentDB = self.performGet('getDBHash', i)
|
||||||
|
|
||||||
if currentDB != False:
|
if currentDB != False:
|
||||||
logger.debug(i + " hash db (from request): " + currentDB)
|
logger.debug('%s hash db (from request): %s' % (str(i), str(currentDB)))
|
||||||
else:
|
else:
|
||||||
logger.warn("Error getting hash db status for " + i)
|
logger.warn('Failed to get hash db status for %s' % str(i))
|
||||||
|
|
||||||
if currentDB != False:
|
if currentDB != False:
|
||||||
if lastDB != currentDB:
|
if lastDB != currentDB:
|
||||||
logger.debug('Fetching hash from ' + i + ' - ' + currentDB + ' current hash.')
|
logger.debug('Fetching hash from %s - %s current hash.' % (str(i), currentDB))
|
||||||
try:
|
try:
|
||||||
blocks += self.performGet('getBlockHashes', i)
|
blocks += self.performGet('getBlockHashes', i)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
logger.warn('Failed to get data hash from ' + i)
|
logger.warn('Failed to get data hash from %s' % str(i))
|
||||||
self.peerData[i]['failCount'] -= 1
|
self.peerData[i]['failCount'] -= 1
|
||||||
if self._utils.validateHash(currentDB):
|
if self._utils.validateHash(currentDB):
|
||||||
self._core.setAddressInfo(i, "DBHash", currentDB)
|
self._core.setAddressInfo(i, "DBHash", currentDB)
|
||||||
@ -501,11 +500,11 @@ class OnionrCommunicate:
|
|||||||
#logger.debug('Exchanged block (blockList): ' + i)
|
#logger.debug('Exchanged block (blockList): ' + i)
|
||||||
if not self._utils.validateHash(i):
|
if not self._utils.validateHash(i):
|
||||||
# skip hash if it isn't valid
|
# skip hash if it isn't valid
|
||||||
logger.warn('Hash ' + i + ' is not valid')
|
logger.warn('Hash %s is not valid' % str(i))
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
self.newHashes[i] = 0
|
self.newHashes[i] = 0
|
||||||
logger.debug('Adding ' + i + ' to hash database...')
|
logger.debug('Adding %s to hash database...' % str(i))
|
||||||
self._core.addToBlockDB(i)
|
self._core.addToBlockDB(i)
|
||||||
self.lookupBlocksThreads -= 1
|
self.lookupBlocksThreads -= 1
|
||||||
return
|
return
|
||||||
@ -532,14 +531,14 @@ class OnionrCommunicate:
|
|||||||
|
|
||||||
# check if a new hash has been around too long, delete it from database and add it to ignore list
|
# check if a new hash has been around too long, delete it from database and add it to ignore list
|
||||||
if self.newHashes[i] >= self.keepNewHash:
|
if self.newHashes[i] >= self.keepNewHash:
|
||||||
logger.warn('Ignoring block ' + i + ' because it took to long to get valid data.')
|
logger.warn('Ignoring block %s because it took to long to get valid data.' % str(i))
|
||||||
del self.newHashes[i]
|
del self.newHashes[i]
|
||||||
self._core.removeBlock(i)
|
self._core.removeBlock(i)
|
||||||
self.ignoredHashes.append(i)
|
self.ignoredHashes.append(i)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.newHashes[i] += 1
|
self.newHashes[i] += 1
|
||||||
logger.warn('UNSAVED BLOCK: ' + i)
|
logger.warn('Block is unsaved: %s' % str(i))
|
||||||
data = self.downloadBlock(i)
|
data = self.downloadBlock(i)
|
||||||
|
|
||||||
# if block was successfull gotten (hash already verified)
|
# if block was successfull gotten (hash already verified)
|
||||||
@ -565,9 +564,9 @@ class OnionrCommunicate:
|
|||||||
blockContent = blockContent.decode()
|
blockContent = blockContent.decode()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if not self.verifyPow(blockContent, blockMeta2):
|
if not self.verifyPow(blockContent, blockMeta2):
|
||||||
logger.warn(i + " has invalid or insufficient proof of work token, deleting")
|
logger.warn("%s has invalid or insufficient proof of work token, deleting..." % str(i))
|
||||||
self._core.removeBlock(i)
|
self._core.removeBlock(i)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -586,13 +585,13 @@ class OnionrCommunicate:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
if self._core._crypto.edVerify(blockMetaData['meta'] + blockContent, creator, blockMetadata['sig'], encodedData=True):
|
if self._core._crypto.edVerify(blockMetaData['meta'] + blockContent, creator, blockMetadata['sig'], encodedData=True):
|
||||||
logger.info(i + ' was signed')
|
logger.info('%s was signed' % str(i))
|
||||||
self._core.updateBlockInfo(i, 'sig', 'true')
|
self._core.updateBlockInfo(i, 'sig', 'true')
|
||||||
else:
|
else:
|
||||||
logger.warn(i + ' has an invalid signature')
|
logger.warn('%s has an invalid signature' % str(i))
|
||||||
self._core.updateBlockInfo(i, 'sig', 'false')
|
self._core.updateBlockInfo(i, 'sig', 'false')
|
||||||
try:
|
try:
|
||||||
logger.info('Block type is ' + blockMeta2['type'])
|
logger.info('Block type is %s' % str(blockMeta2['type']))
|
||||||
self._core.updateBlockInfo(i, 'dataType', blockMeta2['type'])
|
self._core.updateBlockInfo(i, 'dataType', blockMeta2['type'])
|
||||||
self.removeBlockFromProcessingList(i)
|
self.removeBlockFromProcessingList(i)
|
||||||
self.removeBlockFromProcessingList(i)
|
self.removeBlockFromProcessingList(i)
|
||||||
@ -652,7 +651,7 @@ class OnionrCommunicate:
|
|||||||
|
|
||||||
if digest == hash.strip():
|
if digest == hash.strip():
|
||||||
self._core.setData(data)
|
self._core.setData(data)
|
||||||
logger.info('Successfully obtained data for ' + hash, timestamp=True)
|
logger.info('Successfully obtained data for %s' % str(hash), timestamp=True)
|
||||||
retVal = True
|
retVal = True
|
||||||
break
|
break
|
||||||
'''
|
'''
|
||||||
@ -662,11 +661,11 @@ class OnionrCommunicate:
|
|||||||
logger.debug('Block text:\n' + data.decode())
|
logger.debug('Block text:\n' + data.decode())
|
||||||
'''
|
'''
|
||||||
else:
|
else:
|
||||||
logger.warn("Failed to validate " + hash + " " + " hash calculated was " + digest)
|
logger.warn("Failed to validate %s -- hash calculated was %s" % (hash, digest))
|
||||||
peerTryCount += 1
|
peerTryCount += 1
|
||||||
|
|
||||||
return retVal
|
return retVal
|
||||||
|
|
||||||
def verifyPow(self, blockContent, metadata):
|
def verifyPow(self, blockContent, metadata):
|
||||||
'''
|
'''
|
||||||
Verifies the proof of work associated with a block
|
Verifies the proof of work associated with a block
|
||||||
@ -679,7 +678,7 @@ class OnionrCommunicate:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
return False
|
return False
|
||||||
dataLen = len(blockContent)
|
dataLen = len(blockContent)
|
||||||
|
|
||||||
expectedHash = self._crypto.blake2bHash(base64.b64decode(metadata['powToken']) + self._crypto.blake2bHash(blockContent.encode()))
|
expectedHash = self._crypto.blake2bHash(base64.b64decode(metadata['powToken']) + self._crypto.blake2bHash(blockContent.encode()))
|
||||||
difficulty = 0
|
difficulty = 0
|
||||||
try:
|
try:
|
||||||
@ -696,11 +695,9 @@ class OnionrCommunicate:
|
|||||||
logger.info('Validated block pow')
|
logger.info('Validated block pow')
|
||||||
retData = True
|
retData = True
|
||||||
else:
|
else:
|
||||||
logger.warn("Invalid token")
|
logger.warn("Invalid token (#1)")
|
||||||
else:
|
else:
|
||||||
logger.warn('expected hash ' + expectedHash)
|
logger.warn('Invalid token (#2): Expected hash %s, got hash %s...' % (metadata['powHash'], expectedHash))
|
||||||
logger.warn('got hash ' + metadata['powHash'])
|
|
||||||
logger.warn("Invalid token2")
|
|
||||||
|
|
||||||
return retData
|
return retData
|
||||||
|
|
||||||
@ -722,7 +719,7 @@ class OnionrCommunicate:
|
|||||||
raise Exception("Could not perform self address check in performGet due to not knowing our address")
|
raise Exception("Could not perform self address check in performGet due to not knowing our address")
|
||||||
if selfCheck:
|
if selfCheck:
|
||||||
if peer.replace('/', '') == self._core.hsAdder:
|
if peer.replace('/', '') == self._core.hsAdder:
|
||||||
logger.warn('Tried to performget to own hidden service, but selfCheck was not set to false')
|
logger.warn('Tried to performGet to own hidden service, but selfCheck was not set to false')
|
||||||
return
|
return
|
||||||
|
|
||||||
# Store peer in peerData dictionary (non permanent)
|
# Store peer in peerData dictionary (non permanent)
|
||||||
@ -738,14 +735,14 @@ class OnionrCommunicate:
|
|||||||
try:
|
try:
|
||||||
if skipHighFailureAddress and self.peerData[peer]['failCount'] > self.highFailureAmount:
|
if skipHighFailureAddress and self.peerData[peer]['failCount'] > self.highFailureAmount:
|
||||||
retData = False
|
retData = False
|
||||||
logger.debug('Skipping ' + peer + ' because of high failure rate')
|
logger.debug('Skipping %s because of high failure rate.' % peer)
|
||||||
else:
|
else:
|
||||||
self.peerStatus[peer] = action
|
self.peerStatus[peer] = action
|
||||||
logger.debug('Contacting ' + peer + ' on port ' + socksPort)
|
logger.debug('Contacting %s on port %s' % (peer, str(socksPort)))
|
||||||
r = requests.get(url, headers=headers, proxies=proxies, timeout=(15, 30))
|
r = requests.get(url, headers=headers, proxies=proxies, timeout=(15, 30))
|
||||||
retData = r.text
|
retData = r.text
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
logger.warn(action + " failed with peer " + peer + ": " + str(e))
|
logger.debug("%s failed with peer %s" % (action, peer))
|
||||||
retData = False
|
retData = False
|
||||||
|
|
||||||
if not retData:
|
if not retData:
|
||||||
@ -755,7 +752,7 @@ class OnionrCommunicate:
|
|||||||
self.peerData[peer]['failCount'] -= 1
|
self.peerData[peer]['failCount'] -= 1
|
||||||
self.peerData[peer]['lastConnectTime'] = math.floor(time.time())
|
self.peerData[peer]['lastConnectTime'] = math.floor(time.time())
|
||||||
return retData
|
return retData
|
||||||
|
|
||||||
def peerStatusTaken(self, peer, status):
|
def peerStatusTaken(self, peer, status):
|
||||||
'''
|
'''
|
||||||
Returns if a peer is currently performing a specified action
|
Returns if a peer is currently performing a specified action
|
||||||
|
@ -85,7 +85,7 @@ class Core:
|
|||||||
# This function simply adds a peer to the DB
|
# This function simply adds a peer to the DB
|
||||||
if not self._utils.validatePubKey(peerID):
|
if not self._utils.validatePubKey(peerID):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
conn = sqlite3.connect(self.peerDB)
|
conn = sqlite3.connect(self.peerDB)
|
||||||
hashID = self._crypto.pubKeyHashID(peerID)
|
hashID = self._crypto.pubKeyHashID(peerID)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
@ -154,6 +154,8 @@ class Onionr:
|
|||||||
'config': self.configure,
|
'config': self.configure,
|
||||||
'start': self.start,
|
'start': self.start,
|
||||||
'stop': self.killDaemon,
|
'stop': self.killDaemon,
|
||||||
|
'status': self.showStats,
|
||||||
|
'statistics': self.showStats,
|
||||||
'stats': self.showStats,
|
'stats': self.showStats,
|
||||||
|
|
||||||
'enable-plugin': self.enablePlugin,
|
'enable-plugin': self.enablePlugin,
|
||||||
@ -559,6 +561,7 @@ class Onionr:
|
|||||||
# define stats messages here
|
# define stats messages here
|
||||||
messages = {
|
messages = {
|
||||||
# info about local client
|
# info about local client
|
||||||
|
'Onionr Daemon Status' : ((logger.colors.fg.green + 'Online') if self.onionrUtils.isCommunicatorRunning(timeout = 2) else logger.colors.fg.red + 'Offline'),
|
||||||
'Public Key' : self.onionrCore._crypto.pubKey,
|
'Public Key' : self.onionrCore._crypto.pubKey,
|
||||||
'Address' : self.get_hostname(),
|
'Address' : self.get_hostname(),
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ class POW:
|
|||||||
blockCheck = 300000 # How often the hasher should check if the bitcoin block is updated (slows hashing but prevents less wasted work)
|
blockCheck = 300000 # How often the hasher should check if the bitcoin block is updated (slows hashing but prevents less wasted work)
|
||||||
blockCheckCount = 0
|
blockCheckCount = 0
|
||||||
block = '' #self.bitcoinNode.getBlockHash(self.bitcoinNode.getLastBlockHeight())
|
block = '' #self.bitcoinNode.getBlockHash(self.bitcoinNode.getLastBlockHeight())
|
||||||
#logger.debug('thread started')
|
|
||||||
myCore = core.Core()
|
myCore = core.Core()
|
||||||
while self.hashing:
|
while self.hashing:
|
||||||
'''
|
'''
|
||||||
|
@ -125,15 +125,15 @@ class OnionrUtils:
|
|||||||
retVal = False
|
retVal = False
|
||||||
if newAdderList != False:
|
if newAdderList != False:
|
||||||
for adder in newAdderList.split(','):
|
for adder in newAdderList.split(','):
|
||||||
if not adder in self._core.listAdders(randomOrder=False) and adder.strip() != self.getMyAddress():
|
if not adder in self._core.listAdders(randomOrder = False) and adder.strip() != self.getMyAddress():
|
||||||
if self._core.addAddress(adder):
|
if self._core.addAddress(adder):
|
||||||
logger.info('Added ' + adder + ' to db.', timestamp=True)
|
logger.info('Added ' + adder + ' to db.', timestamp = True)
|
||||||
retVal = True
|
retVal = True
|
||||||
else:
|
else:
|
||||||
logger.debug(adder + " is either our address or already in our DB")
|
logger.debug('%s is either our address or already in our DB' % adder)
|
||||||
return retVal
|
return retVal
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.error('Failed to merge adders.', error=error)
|
logger.error('Failed to merge adders.', error = error)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def getMyAddress(self):
|
def getMyAddress(self):
|
||||||
@ -141,7 +141,7 @@ class OnionrUtils:
|
|||||||
with open('./data/hs/hostname', 'r') as hostname:
|
with open('./data/hs/hostname', 'r') as hostname:
|
||||||
return hostname.read().strip()
|
return hostname.read().strip()
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.error('Failed to read my address.', error=error)
|
logger.error('Failed to read my address.', error = error)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def localCommand(self, command, silent = True):
|
def localCommand(self, command, silent = True):
|
||||||
|
Loading…
Reference in New Issue
Block a user