2018-06-10 08:00:01 +00:00
#!/usr/bin/env python3
'''
2018-08-27 03:44:32 +00:00
Onionr - P2P Anonymous Storage Network
2018-06-10 08:00:01 +00:00
This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon , getting commands from the command queue database ( see core . Core . daemonQueue )
'''
'''
This program is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < https : / / www . gnu . org / licenses / > .
'''
2019-03-04 22:29:44 +00:00
import sys , os , core , config , json , requests , time , logger , threading , base64 , onionr , uuid , binascii
from dependencies import secrets
from utils import networkmerger
2018-07-31 04:41:32 +00:00
import onionrexceptions , onionrpeers , onionrevents as events , onionrplugins as plugins , onionrblockapi as block
2019-02-28 03:02:44 +00:00
from communicatorutils import onionrdaemontools
2019-03-23 02:58:09 +00:00
from communicatorutils import servicecreator
2019-03-19 05:09:53 +00:00
import onionrservices , onionr , onionrproofs
2019-03-04 22:29:44 +00:00
from communicatorutils import onionrcommunicatortimers , proxypicker
2019-02-11 23:44:39 +00:00
2019-02-28 03:02:44 +00:00
OnionrCommunicatorTimers = onionrcommunicatortimers . OnionrCommunicatorTimers
2019-01-20 18:09:53 +00:00
config . reload ( )
2018-06-11 07:40:45 +00:00
class OnionrCommunicatorDaemon :
2019-01-20 18:09:53 +00:00
def __init__ ( self , onionrInst , proxyPort , developmentMode = config . get ( ' general.dev_mode ' , False ) ) :
onionrInst . communicatorInst = self
2018-11-11 03:25:40 +00:00
# configure logger and stuff
onionr . Onionr . setupConfig ( ' data/ ' , self = self )
2019-01-20 18:09:53 +00:00
self . proxyPort = proxyPort
2018-06-14 04:35:56 +00:00
2018-08-21 20:01:50 +00:00
self . isOnline = True # Assume we're connected to the internet
2018-07-01 21:01:19 +00:00
# list of timer instances
2018-06-12 23:32:33 +00:00
self . timers = [ ]
2018-07-01 21:01:19 +00:00
2019-02-12 19:18:08 +00:00
# initialize core with Tor socks port being 3rd argument
2019-01-20 18:09:53 +00:00
self . proxyPort = proxyPort
2019-01-20 22:54:04 +00:00
self . _core = onionrInst . onionrCore
2018-07-01 21:01:19 +00:00
2018-11-13 17:07:46 +00:00
self . blocksToUpload = [ ]
2018-07-01 21:01:19 +00:00
# loop time.sleep delay in seconds
2018-06-12 23:32:33 +00:00
self . delay = 1
2018-07-01 21:01:19 +00:00
# lists of connected peers and peers we know we can't reach currently
2018-06-13 22:22:48 +00:00
self . onlinePeers = [ ]
2018-06-16 20:54:56 +00:00
self . offlinePeers = [ ]
2018-08-31 22:53:48 +00:00
self . cooldownPeer = { }
self . connectTimes = { }
2018-07-27 03:07:50 +00:00
self . peerProfiles = [ ] # list of peer's profiles (onionrpeers.PeerProfile instances)
2019-02-12 19:18:08 +00:00
self . newPeers = [ ] # Peers merged to us. Don't add to db until we know they're reachable
2018-06-13 22:22:48 +00:00
2018-07-01 21:01:19 +00:00
# amount of threads running by name, used to prevent too many
2018-06-13 07:33:37 +00:00
self . threadCounts = { }
2018-07-06 04:27:12 +00:00
2019-02-12 19:18:08 +00:00
# set true when shutdown command received
2018-06-13 03:43:39 +00:00
self . shutdown = False
2018-06-15 05:45:07 +00:00
2018-07-01 21:01:19 +00:00
# list of new blocks to download, added to when new block lists are fetched from peers
2019-01-22 17:40:27 +00:00
self . blockQueue = { }
2018-07-06 04:27:12 +00:00
2018-07-13 21:02:41 +00:00
# list of blocks currently downloading, avoid s
self . currentDownloading = [ ]
2018-12-09 17:29:39 +00:00
# timestamp when the last online node was seen
self . lastNodeSeen = None
2019-01-16 05:57:47 +00:00
# Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time
self . dbTimestamps = { }
2018-06-13 03:43:39 +00:00
# Clear the daemon queue for any dead messages
2018-06-13 07:33:37 +00:00
if os . path . exists ( self . _core . queueDB ) :
self . _core . clearDaemonQueue ( )
# Loads in and starts the enabled plugins
plugins . reload ( )
2018-08-07 07:31:53 +00:00
# daemon tools are misc daemon functions, e.g. announce to online peers
# intended only for use by OnionrCommunicatorDaemon
self . daemonTools = onionrdaemontools . DaemonTools ( self )
2019-01-20 18:09:53 +00:00
# time app started running for info/statistics purposes
self . startTime = self . _core . _utils . getEpoch ( )
2018-09-15 01:05:25 +00:00
2019-01-20 18:09:53 +00:00
if developmentMode :
2018-10-30 22:22:06 +00:00
OnionrCommunicatorTimers ( self , self . heartbeat , 30 )
2018-07-06 04:27:12 +00:00
2018-07-02 04:04:14 +00:00
# Set timers, function reference, seconds
2018-08-03 06:28:26 +00:00
# requiresPeer True means the timer function won't fire if we have no connected peers
2018-08-31 22:53:48 +00:00
peerPoolTimer = OnionrCommunicatorTimers ( self , self . getOnlinePeers , 60 , maxThreads = 1 )
2019-01-20 02:23:26 +00:00
OnionrCommunicatorTimers ( self , self . runCheck , 2 , maxThreads = 1 )
2018-08-25 14:33:38 +00:00
OnionrCommunicatorTimers ( self , self . lookupBlocks , self . _core . config . get ( ' timers.lookupBlocks ' ) , requiresPeer = True , maxThreads = 1 )
2019-01-09 16:54:35 +00:00
OnionrCommunicatorTimers ( self , self . getBlocks , self . _core . config . get ( ' timers.getBlocks ' ) , requiresPeer = True , maxThreads = 2 )
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . clearOfflinePeer , 58 )
2019-03-15 16:48:06 +00:00
blockCleanupTimer = OnionrCommunicatorTimers ( self , self . daemonTools . cleanOldBlocks , 65 )
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . lookupAdders , 60 , requiresPeer = True )
2018-08-31 22:53:48 +00:00
OnionrCommunicatorTimers ( self , self . daemonTools . cooldownPeer , 30 , requiresPeer = True )
2018-11-13 17:07:46 +00:00
OnionrCommunicatorTimers ( self , self . uploadBlock , 10 , requiresPeer = True , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
OnionrCommunicatorTimers ( self , self . daemonCommands , 6 , maxThreads = 1 )
2018-12-24 06:12:46 +00:00
OnionrCommunicatorTimers ( self , self . detectAPICrash , 30 , maxThreads = 1 )
2019-03-24 02:56:46 +00:00
if config . get ( ' general.socket_servers ' , False ) :
self . services = onionrservices . OnionrServices ( self . _core )
self . active_services = [ ]
2019-03-26 04:25:46 +00:00
self . service_greenlets = [ ]
2019-03-25 23:46:25 +00:00
OnionrCommunicatorTimers ( self , servicecreator . service_creator , 5 , maxThreads = 50 , myArgs = ( self , ) )
2019-03-24 02:56:46 +00:00
else :
self . services = None
2018-12-09 17:29:39 +00:00
deniableBlockTimer = OnionrCommunicatorTimers ( self , self . daemonTools . insertDeniableBlock , 180 , requiresPeer = True , maxThreads = 1 )
2018-08-21 20:01:50 +00:00
netCheckTimer = OnionrCommunicatorTimers ( self , self . daemonTools . netCheck , 600 )
2018-12-09 17:29:39 +00:00
if config . get ( ' general.security_level ' ) == 0 :
2019-01-11 22:59:21 +00:00
announceTimer = OnionrCommunicatorTimers ( self , self . daemonTools . announceNode , 3600 , requiresPeer = True , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
announceTimer . count = ( announceTimer . frequency - 120 )
else :
logger . debug ( ' Will not announce node. ' )
2018-08-03 06:28:26 +00:00
cleanupTimer = OnionrCommunicatorTimers ( self , self . peerCleanup , 300 , requiresPeer = True )
2019-03-15 05:30:43 +00:00
forwardSecrecyTimer = OnionrCommunicatorTimers ( self , self . daemonTools . cleanKeys , 15 , maxThreads = 1 )
2018-07-09 07:02:33 +00:00
# set loop to execute instantly to load up peer pool (replaced old pool init wait)
2018-07-23 07:43:10 +00:00
peerPoolTimer . count = ( peerPoolTimer . frequency - 1 )
2018-08-03 06:28:26 +00:00
cleanupTimer . count = ( cleanupTimer . frequency - 60 )
2018-12-09 17:29:39 +00:00
deniableBlockTimer . count = ( deniableBlockTimer . frequency - 175 )
2019-03-15 16:48:06 +00:00
blockCleanupTimer . count = ( blockCleanupTimer . frequency - 5 )
2018-11-09 19:07:26 +00:00
#forwardSecrecyTimer.count = (forwardSecrecyTimer.frequency - 990)
2018-06-13 03:43:39 +00:00
2018-07-02 04:04:14 +00:00
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
2018-07-08 00:26:01 +00:00
try :
while not self . shutdown :
for i in self . timers :
if self . shutdown :
break
i . processTimer ( )
time . sleep ( self . delay )
2019-01-18 05:34:13 +00:00
# Debug to print out used FDs (regular and net)
#proc = psutil.Process()
#print(proc.open_files(), len(psutil.net_connections()))
2018-07-08 00:26:01 +00:00
except KeyboardInterrupt :
self . shutdown = True
pass
2018-07-06 04:27:12 +00:00
2018-06-13 03:43:39 +00:00
logger . info ( ' Goodbye. ' )
2019-03-27 18:55:43 +00:00
try :
self . service_greenlets
except AttributeError :
pass
else :
for server in self . service_greenlets :
server . stop ( )
2018-08-23 04:59:41 +00:00
self . _core . _utils . localCommand ( ' shutdown ' ) # shutdown the api
2018-07-08 00:26:01 +00:00
time . sleep ( 0.5 )
2018-07-23 07:43:10 +00:00
2018-06-23 07:36:22 +00:00
def lookupAdders ( self ) :
''' Lookup new peer addresses '''
2018-11-10 07:17:19 +00:00
logger . info ( ' Looking up new addresses... ' )
2018-06-23 07:36:22 +00:00
tryAmount = 1
2019-02-12 19:18:08 +00:00
newPeers = [ ]
2018-06-23 07:36:22 +00:00
for i in range ( tryAmount ) :
2018-07-02 04:04:14 +00:00
# Download new peer address list from random online peers
2019-02-12 19:18:08 +00:00
if len ( newPeers ) > 10000 :
# Dont get new peers if we have too many queued up
break
2018-06-23 07:36:22 +00:00
peer = self . pickOnlinePeer ( )
newAdders = self . peerAction ( peer , action = ' pex ' )
2019-02-12 19:18:08 +00:00
try :
newPeers = newAdders . split ( ' , ' )
except AttributeError :
pass
else :
# Validate new peers are good format and not already in queue
invalid = [ ]
for x in newPeers :
2019-02-21 20:25:45 +00:00
x = x . strip ( )
if not self . _core . _utils . validateID ( x ) or x in self . newPeers or x == self . _core . hsAddress :
2019-02-12 19:18:08 +00:00
invalid . append ( x )
for x in invalid :
newPeers . remove ( x )
self . newPeers . extend ( newPeers )
2018-07-19 22:32:21 +00:00
self . decrementThreadCount ( ' lookupAdders ' )
2018-07-02 04:04:14 +00:00
2018-06-15 05:45:07 +00:00
def lookupBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' Lookup new blocks & add them to download queue '''
2018-11-10 07:17:19 +00:00
logger . info ( ' Looking up new blocks... ' )
2018-06-15 05:45:07 +00:00
tryAmount = 2
newBlocks = ' '
2018-07-10 07:29:17 +00:00
existingBlocks = self . _core . getBlockList ( )
2018-07-31 05:28:10 +00:00
triedPeers = [ ] # list of peers we've tried this time around
2018-11-09 05:22:43 +00:00
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
2019-01-16 05:57:47 +00:00
lastLookupTime = 0 # Last time we looked up a particular peer's list
2018-06-15 05:45:07 +00:00
for i in range ( tryAmount ) :
2019-01-16 05:57:47 +00:00
listLookupCommand = ' getblocklist ' # This is defined here to reset it each time
2018-11-09 05:22:43 +00:00
if len ( self . blockQueue ) > = maxBacklog :
break
2018-08-23 17:48:49 +00:00
if not self . isOnline :
break
2018-11-09 05:22:43 +00:00
# check if disk allocation is used
2018-08-23 04:59:41 +00:00
if self . _core . _utils . storageCounter . isFull ( ) :
2018-08-23 19:46:23 +00:00
logger . debug ( ' Not looking up new blocks due to maximum amount of allowed disk space used ' )
2018-08-23 04:59:41 +00:00
break
2018-07-02 04:04:14 +00:00
peer = self . pickOnlinePeer ( ) # select random online peer
2018-07-31 05:28:10 +00:00
# if we've already tried all the online peers this time around, stop
if peer in triedPeers :
if len ( self . onlinePeers ) == len ( triedPeers ) :
break
else :
continue
triedPeers . append ( peer )
2019-02-12 05:30:56 +00:00
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
# Saved in memory only for privacy reasons
try :
lastLookupTime = self . dbTimestamps [ peer ]
except KeyError :
lastLookupTime = 0
else :
listLookupCommand + = ' ?date= %s ' % ( lastLookupTime , )
try :
newBlocks = self . peerAction ( peer , listLookupCommand ) # get list of new block hashes
except Exception as error :
logger . warn ( ' Could not get new blocks from %s . ' % peer , error = error )
newBlocks = False
else :
self . dbTimestamps [ peer ] = self . _core . _utils . getRoundedEpoch ( roundS = 60 )
if newBlocks != False :
# if request was a success
for i in newBlocks . split ( ' \n ' ) :
if self . _core . _utils . validateHash ( i ) :
# if newline seperated string is valid hash
if not i in existingBlocks :
# if block does not exist on disk and is not already in block queue
if i not in self . blockQueue :
if onionrproofs . hashMeetsDifficulty ( i ) and not self . _core . _blacklist . inBlacklist ( i ) :
if len ( self . blockQueue ) < = 1000000 :
self . blockQueue [ i ] = [ peer ] # add blocks to download queue
else :
if peer not in self . blockQueue [ i ] :
if len ( self . blockQueue [ i ] ) < 10 :
self . blockQueue [ i ] . append ( peer )
2018-06-15 05:45:07 +00:00
self . decrementThreadCount ( ' lookupBlocks ' )
return
def getBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' download new blocks in queue '''
2019-01-22 17:40:27 +00:00
for blockHash in list ( self . blockQueue ) :
triedQueuePeers = [ ] # List of peers we've tried for a block
try :
blockPeers = list ( self . blockQueue [ blockHash ] )
except KeyError :
blockPeers = [ ]
2018-08-23 19:46:23 +00:00
removeFromQueue = True
2018-08-23 17:48:49 +00:00
if self . shutdown or not self . isOnline :
# Exit loop if shutting down or offline
2018-08-03 06:28:26 +00:00
break
2018-08-23 04:59:41 +00:00
# Do not download blocks being downloaded or that are already saved (edge cases)
2018-07-13 21:02:41 +00:00
if blockHash in self . currentDownloading :
2019-02-07 01:03:31 +00:00
#logger.debug('Already downloading block %s...' % blockHash)
2018-07-13 21:02:41 +00:00
continue
2018-08-17 04:21:21 +00:00
if blockHash in self . _core . getBlockList ( ) :
2019-02-07 01:03:31 +00:00
#logger.debug('Block %s is already saved.' % (blockHash,))
2019-01-22 20:15:02 +00:00
try :
del self . blockQueue [ blockHash ]
except KeyError :
pass
2018-08-17 04:21:21 +00:00
continue
2018-08-24 22:42:09 +00:00
if self . _core . _blacklist . inBlacklist ( blockHash ) :
continue
2018-08-23 19:46:23 +00:00
if self . _core . _utils . storageCounter . isFull ( ) :
break
2018-08-23 04:59:41 +00:00
self . currentDownloading . append ( blockHash ) # So we can avoid concurrent downloading in other threads of same block
2019-01-22 17:40:27 +00:00
if len ( blockPeers ) == 0 :
peerUsed = self . pickOnlinePeer ( )
else :
blockPeers = self . _core . _crypto . randomShuffle ( blockPeers )
peerUsed = blockPeers . pop ( 0 )
2019-01-17 05:31:56 +00:00
if not self . shutdown and peerUsed . strip ( ) != ' ' :
logger . info ( " Attempting to download %s from %s ... " % ( blockHash [ : 12 ] , peerUsed ) )
2018-12-20 06:01:53 +00:00
content = self . peerAction ( peerUsed , ' getdata/ ' + blockHash ) # block content from random peer (includes metadata)
2018-10-02 16:45:56 +00:00
if content != False and len ( content ) > 0 :
2018-06-16 07:33:54 +00:00
try :
content = content . encode ( )
except AttributeError :
pass
2019-02-07 18:12:04 +00:00
2018-07-10 07:15:55 +00:00
realHash = self . _core . _crypto . sha3Hash ( content )
try :
realHash = realHash . decode ( ) # bytes on some versions for some reason
except AttributeError :
pass
if realHash == blockHash :
2018-06-16 20:54:56 +00:00
content = content . decode ( ) # decode here because sha3Hash needs bytes above
2018-06-26 04:39:45 +00:00
metas = self . _core . _utils . getBlockMetadataFromData ( content ) # returns tuple(metadata, meta), meta is also in metadata
2018-06-16 07:33:54 +00:00
metadata = metas [ 0 ]
2018-08-16 05:01:40 +00:00
if self . _core . _utils . validateMetadata ( metadata , metas [ 2 ] ) : # check if metadata is valid, and verify nonce
2018-07-08 07:51:23 +00:00
if self . _core . _crypto . verifyPow ( content ) : # check if POW is enough/correct
2019-01-16 05:57:47 +00:00
logger . info ( ' Attempting to save block %s ... ' % blockHash [ : 12 ] )
2018-08-23 04:59:41 +00:00
try :
self . _core . setData ( content )
except onionrexceptions . DiskAllocationReached :
2018-11-10 07:17:19 +00:00
logger . error ( ' Reached disk allocation allowance, cannot save block %s . ' % blockHash )
2018-08-23 19:46:23 +00:00
removeFromQueue = False
2018-08-23 04:59:41 +00:00
else :
self . _core . addToBlockDB ( blockHash , dataSaved = True )
self . _core . _utils . processBlockMetadata ( blockHash ) # caches block metadata values to block database
2018-06-26 04:39:45 +00:00
else :
2018-11-10 07:17:19 +00:00
logger . warn ( ' POW failed for block %s . ' % blockHash )
2018-06-16 20:54:56 +00:00
else :
2018-08-17 03:30:36 +00:00
if self . _core . _blacklist . inBlacklist ( realHash ) :
2018-11-10 07:17:19 +00:00
logger . warn ( ' Block %s is blacklisted. ' % ( realHash , ) )
2018-08-17 03:30:36 +00:00
else :
2018-11-10 07:17:19 +00:00
logger . warn ( ' Metadata for block %s is invalid. ' % blockHash )
2018-08-17 03:30:36 +00:00
self . _core . _blacklist . addToDB ( blockHash )
2018-06-16 20:54:56 +00:00
else :
2018-07-02 04:04:14 +00:00
# if block didn't meet expected hash
2018-07-10 07:11:58 +00:00
tempHash = self . _core . _crypto . sha3Hash ( content ) # lazy hack, TODO use var
try :
tempHash = tempHash . decode ( )
except AttributeError :
pass
2018-08-03 20:01:13 +00:00
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
2018-09-24 23:48:00 +00:00
onionrpeers . PeerProfiles ( peerUsed , self . _core ) . addScore ( - 50 )
2019-01-05 22:16:36 +00:00
if tempHash != ' ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253 ' :
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger . warn ( ' Block hash validation failed for ' + blockHash + ' got ' + tempHash )
2019-01-22 03:29:29 +00:00
else :
removeFromQueue = False # Don't remove from queue if 404
2018-08-23 19:46:23 +00:00
if removeFromQueue :
2018-10-02 16:45:56 +00:00
try :
2019-01-22 17:40:27 +00:00
del self . blockQueue [ blockHash ] # remove from block queue both if success or false
except KeyError :
2018-10-02 16:45:56 +00:00
pass
2018-07-23 07:43:10 +00:00
self . currentDownloading . remove ( blockHash )
2018-06-22 00:34:42 +00:00
self . decrementThreadCount ( ' getBlocks ' )
2018-06-15 05:45:07 +00:00
return
def pickOnlinePeer ( self ) :
''' randomly picks peer from pool without bias (using secrets module) '''
retData = ' '
while True :
peerLength = len ( self . onlinePeers )
2018-06-16 20:54:56 +00:00
if peerLength < = 0 :
break
2018-06-15 05:45:07 +00:00
try :
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
retData = self . onlinePeers [ self . _core . _crypto . secrets . randbelow ( peerLength ) ]
except IndexError :
pass
else :
break
return retData
2018-06-13 22:22:48 +00:00
def decrementThreadCount ( self , threadName ) :
2018-06-15 05:45:07 +00:00
''' Decrement amount of a thread name if more than zero, called when a function meant to be run in a thread ends '''
2018-06-16 20:54:56 +00:00
try :
if self . threadCounts [ threadName ] > 0 :
self . threadCounts [ threadName ] - = 1
except KeyError :
pass
2018-07-06 04:27:12 +00:00
2018-06-21 07:24:58 +00:00
def clearOfflinePeer ( self ) :
''' Removes the longest offline peer to retry later '''
try :
2018-06-22 00:57:12 +00:00
removed = self . offlinePeers . pop ( 0 )
2018-06-21 07:24:58 +00:00
except IndexError :
pass
2018-06-22 00:57:12 +00:00
else :
2018-07-06 04:27:12 +00:00
logger . debug ( ' Removed ' + removed + ' from offline list, will try them again. ' )
2018-06-21 07:24:58 +00:00
self . decrementThreadCount ( ' clearOfflinePeer ' )
2018-06-13 22:22:48 +00:00
def getOnlinePeers ( self ) :
2018-11-11 03:25:40 +00:00
'''
Manages the self . onlinePeers attribute list , connects to more peers if we have none connected
'''
2018-07-02 04:04:14 +00:00
2018-11-11 03:25:40 +00:00
logger . debug ( ' Refreshing peer pool... ' )
2018-09-24 23:48:00 +00:00
maxPeers = int ( config . get ( ' peers.max_connect ' , 10 ) )
2018-06-13 22:22:48 +00:00
needed = maxPeers - len ( self . onlinePeers )
for i in range ( needed ) :
2018-07-03 08:18:07 +00:00
if len ( self . onlinePeers ) == 0 :
self . connectNewPeer ( useBootstrap = True )
2018-08-02 20:18:01 +00:00
else :
self . connectNewPeer ( )
2018-12-09 17:29:39 +00:00
2018-07-09 07:02:33 +00:00
if self . shutdown :
break
else :
if len ( self . onlinePeers ) == 0 :
2018-12-09 17:29:39 +00:00
logger . debug ( ' Couldn \' t connect to any peers. ' + ( ' Last node seen %s ago. ' % self . daemonTools . humanReadableTime ( time . time ( ) - self . lastNodeSeen ) if not self . lastNodeSeen is None else ' ' ) )
else :
self . lastNodeSeen = time . time ( )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' getOnlinePeers ' )
2018-07-03 08:18:07 +00:00
def addBootstrapListToPeerList ( self , peerList ) :
2018-11-11 03:25:40 +00:00
'''
Add the bootstrap list to the peer list ( no duplicates )
'''
2018-07-01 21:01:19 +00:00
for i in self . _core . bootstrapList :
2018-11-10 07:22:27 +00:00
if i not in peerList and i not in self . offlinePeers and i != self . _core . hsAddress and len ( str ( i ) . strip ( ) ) > 0 :
2018-07-01 21:01:19 +00:00
peerList . append ( i )
2018-08-11 05:23:59 +00:00
self . _core . addAddress ( i )
2018-07-01 21:01:19 +00:00
2018-07-03 08:18:07 +00:00
def connectNewPeer ( self , peer = ' ' , useBootstrap = False ) :
2018-06-13 22:22:48 +00:00
''' Adds a new random online peer to self.onlinePeers '''
retData = False
2018-06-16 20:54:56 +00:00
tried = self . offlinePeers
2018-06-13 22:22:48 +00:00
if peer != ' ' :
if self . _core . _utils . validateID ( peer ) :
peerList = [ peer ]
else :
raise onionrexceptions . InvalidAddress ( ' Will not attempt connection test to invalid address ' )
else :
peerList = self . _core . listAdders ( )
2018-09-24 23:48:00 +00:00
2019-02-12 19:18:08 +00:00
mainPeerList = self . _core . listAdders ( )
2018-08-01 07:22:22 +00:00
peerList = onionrpeers . getScoreSortedPeerList ( self . _core )
2018-06-13 22:22:48 +00:00
2019-02-12 19:18:08 +00:00
if len ( peerList ) < 8 or secrets . randbelow ( 4 ) == 3 :
tryingNew = [ ]
for x in self . newPeers :
if x not in peerList :
peerList . append ( x )
tryingNew . append ( x )
for i in tryingNew :
self . newPeers . remove ( i )
2018-07-03 08:18:07 +00:00
if len ( peerList ) == 0 or useBootstrap :
2018-07-01 21:01:19 +00:00
# Avoid duplicating bootstrap addresses in peerList
2018-07-03 08:18:07 +00:00
self . addBootstrapListToPeerList ( peerList )
2018-06-13 22:22:48 +00:00
for address in peerList :
2018-12-09 17:29:39 +00:00
if not config . get ( ' tor.v3onions ' ) and len ( address ) == 62 :
2018-08-19 04:07:09 +00:00
continue
2019-02-21 20:25:45 +00:00
if address == self . _core . hsAddress :
continue
2018-08-31 22:53:48 +00:00
if len ( address ) == 0 or address in tried or address in self . onlinePeers or address in self . cooldownPeer :
2018-06-16 20:54:56 +00:00
continue
2018-07-31 05:28:10 +00:00
if self . shutdown :
return
2018-06-13 22:22:48 +00:00
if self . peerAction ( address , ' ping ' ) == ' pong! ' :
2018-07-03 23:44:12 +00:00
time . sleep ( 0.1 )
2019-02-12 19:18:08 +00:00
if address not in mainPeerList :
networkmerger . mergeAdders ( address , self . _core )
2018-07-03 23:44:12 +00:00
if address not in self . onlinePeers :
2019-03-13 21:10:11 +00:00
logger . info ( ' Connected to ' + address )
2018-07-03 23:44:12 +00:00
self . onlinePeers . append ( address )
2018-08-31 22:53:48 +00:00
self . connectTimes [ address ] = self . _core . _utils . getEpoch ( )
2018-06-13 22:22:48 +00:00
retData = address
2018-09-24 23:48:00 +00:00
2018-07-30 22:48:29 +00:00
# add peer to profile list if they're not in it
for profile in self . peerProfiles :
if profile . address == address :
break
else :
self . peerProfiles . append ( onionrpeers . PeerProfiles ( address , self . _core ) )
2018-06-13 22:22:48 +00:00
break
else :
2018-06-16 20:54:56 +00:00
tried . append ( address )
2018-07-01 21:01:19 +00:00
logger . debug ( ' Failed to connect to ' + address )
2018-06-15 05:45:07 +00:00
return retData
2018-07-06 04:27:12 +00:00
2018-08-31 22:53:48 +00:00
def removeOnlinePeer ( self , peer ) :
''' Remove an online peer '''
try :
del self . connectTimes [ peer ]
except KeyError :
pass
2019-01-16 05:57:47 +00:00
try :
del self . dbTimestamps [ peer ]
except KeyError :
pass
2018-08-31 22:53:48 +00:00
try :
self . onlinePeers . remove ( peer )
except ValueError :
pass
2018-08-02 07:28:26 +00:00
def peerCleanup ( self ) :
''' This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow) '''
onionrpeers . peerCleanup ( self . _core )
2018-08-03 06:28:26 +00:00
self . decrementThreadCount ( ' peerCleanup ' )
2018-08-02 07:28:26 +00:00
2018-06-15 05:45:07 +00:00
def printOnlinePeers ( self ) :
''' logs online peer list '''
if len ( self . onlinePeers ) == 0 :
logger . warn ( ' No online peers ' )
2018-07-03 21:24:14 +00:00
else :
logger . info ( ' Online peers: ' )
for i in self . onlinePeers :
2018-07-30 22:48:29 +00:00
score = str ( self . getPeerProfileInstance ( i ) . score )
logger . info ( i + ' , score: ' + score )
2018-06-15 05:45:07 +00:00
2019-03-11 05:10:37 +00:00
def peerAction ( self , peer , action , data = ' ' , returnHeaders = False ) :
2018-07-30 00:37:12 +00:00
''' Perform a get request to a peer '''
2018-06-21 07:17:20 +00:00
if len ( peer ) == 0 :
return False
2018-11-15 20:47:35 +00:00
#logger.debug('Performing ' + action + ' with ' + peer + ' on port ' + str(self.proxyPort))
2018-12-22 19:02:09 +00:00
url = ' http:// %s / %s ' % ( peer , action )
2018-06-16 20:54:56 +00:00
if len ( data ) > 0 :
url + = ' &data= ' + data
2018-08-02 20:18:01 +00:00
self . _core . setAddressInfo ( peer , ' lastConnectAttempt ' , self . _core . _utils . getEpoch ( ) ) # mark the time we're trying to request this peer
2018-06-16 20:54:56 +00:00
retData = self . _core . _utils . doGetRequest ( url , port = self . proxyPort )
2018-07-02 04:04:14 +00:00
# if request failed, (error), mark peer offline
2018-06-15 05:45:07 +00:00
if retData == False :
2018-06-15 19:09:41 +00:00
try :
2018-08-01 07:22:22 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( - 10 )
2018-08-31 22:53:48 +00:00
self . removeOnlinePeer ( peer )
2018-09-01 03:29:57 +00:00
if action != ' ping ' :
self . getOnlinePeers ( ) # Will only add a new peer to pool if needed
2018-06-15 19:09:41 +00:00
except ValueError :
pass
2018-07-30 22:48:29 +00:00
else :
2018-08-01 07:22:22 +00:00
self . _core . setAddressInfo ( peer , ' lastConnect ' , self . _core . _utils . getEpoch ( ) )
2018-07-30 22:48:29 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( 1 )
2019-03-11 05:10:37 +00:00
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
2018-09-24 23:48:00 +00:00
2018-07-30 22:48:29 +00:00
def getPeerProfileInstance ( self , peer ) :
''' Gets a peer profile instance from the list of profiles, by address name '''
for i in self . peerProfiles :
# if the peer's profile is already loaded, return that
if i . address == peer :
retData = i
break
else :
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
retData = onionrpeers . PeerProfiles ( peer , self . _core )
2018-06-15 05:45:07 +00:00
return retData
2019-01-20 22:54:04 +00:00
def getUptime ( self ) :
return self . _core . _utils . getEpoch ( ) - self . startTime
2018-06-12 23:32:33 +00:00
def heartbeat ( self ) :
2018-06-13 03:43:39 +00:00
''' Show a heartbeat debug message '''
2019-01-20 22:54:04 +00:00
logger . debug ( ' Heartbeat. Node running for %s . ' % self . daemonTools . humanReadableTime ( self . getUptime ( ) ) )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' heartbeat ' )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def daemonCommands ( self ) :
2018-11-11 03:25:40 +00:00
'''
Process daemon commands from daemonQueue
'''
2018-06-13 03:43:39 +00:00
cmd = self . _core . daemonQueue ( )
2019-01-07 05:50:20 +00:00
response = ' '
2018-06-13 03:43:39 +00:00
if cmd is not False :
2018-07-31 04:41:32 +00:00
events . event ( ' daemon_command ' , onionr = None , data = { ' cmd ' : cmd } )
2018-06-13 03:43:39 +00:00
if cmd [ 0 ] == ' shutdown ' :
self . shutdown = True
2018-06-13 22:22:48 +00:00
elif cmd [ 0 ] == ' announceNode ' :
2018-11-09 19:07:26 +00:00
if len ( self . onlinePeers ) > 0 :
self . announce ( cmd [ 1 ] )
else :
2018-12-09 17:29:39 +00:00
logger . debug ( " No nodes connected. Will not introduce node. " )
2018-11-10 07:17:19 +00:00
elif cmd [ 0 ] == ' runCheck ' : # deprecated
2018-06-13 22:22:48 +00:00
logger . debug ( ' Status check; looks good. ' )
2018-09-26 04:58:11 +00:00
open ( self . _core . dataDir + ' .runcheck ' , ' w+ ' ) . close ( )
2018-06-13 22:22:48 +00:00
elif cmd [ 0 ] == ' connectedPeers ' :
2019-01-07 05:50:20 +00:00
response = ' \n ' . join ( list ( self . onlinePeers ) ) . strip ( )
2019-01-13 22:20:10 +00:00
if response == ' ' :
response = ' none '
2019-01-20 18:09:53 +00:00
elif cmd [ 0 ] == ' localCommand ' :
response = self . _core . _utils . localCommand ( cmd [ 1 ] )
2018-08-02 20:18:01 +00:00
elif cmd [ 0 ] == ' pex ' :
for i in self . timers :
if i . timerFunction . __name__ == ' lookupAdders ' :
i . count = ( i . frequency - 1 )
2018-07-23 07:43:10 +00:00
elif cmd [ 0 ] == ' uploadBlock ' :
2018-11-13 17:07:46 +00:00
self . blocksToUpload . append ( cmd [ 1 ] )
2018-07-31 04:41:32 +00:00
2019-01-13 22:20:10 +00:00
if cmd [ 0 ] not in ( ' ' , None ) :
2019-01-07 05:50:20 +00:00
if response != ' ' :
2019-01-07 21:09:58 +00:00
self . _core . _utils . localCommand ( ' queueResponseAdd/ ' + cmd [ 4 ] , post = True , postData = { ' data ' : response } )
response = ' '
2019-01-07 05:50:20 +00:00
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' daemonCommands ' )
2018-07-23 07:43:10 +00:00
def uploadBlock ( self ) :
2018-07-27 03:07:50 +00:00
''' Upload our block to a few peers '''
# when inserting a block, we try to upload it to a few peers to add some deniability
2018-07-23 20:04:36 +00:00
triedPeers = [ ]
2018-11-13 17:07:46 +00:00
finishedUploads = [ ]
2019-01-28 06:06:20 +00:00
self . blocksToUpload = self . _core . _crypto . randomShuffle ( self . blocksToUpload )
2018-11-13 17:07:46 +00:00
if len ( self . blocksToUpload ) != 0 :
for bl in self . blocksToUpload :
if not self . _core . _utils . validateHash ( bl ) :
logger . warn ( ' Requested to upload invalid block ' )
2018-11-15 20:47:35 +00:00
self . decrementThreadCount ( ' uploadBlock ' )
2018-11-13 17:07:46 +00:00
return
2019-01-28 22:49:04 +00:00
for i in range ( min ( len ( self . onlinePeers ) , 6 ) ) :
2018-11-13 17:07:46 +00:00
peer = self . pickOnlinePeer ( )
if peer in triedPeers :
continue
triedPeers . append ( peer )
2018-12-20 06:01:53 +00:00
url = ' http:// ' + peer + ' /upload '
2018-11-13 17:07:46 +00:00
data = { ' block ' : block . Block ( bl ) . getRaw ( ) }
2019-03-04 22:29:44 +00:00
proxyType = proxypicker . pick_proxy ( peer )
2018-11-15 20:47:35 +00:00
logger . info ( " Uploading block to " + peer )
2018-11-13 17:07:46 +00:00
if not self . _core . _utils . doPostRequest ( url , data = data , proxyType = proxyType ) == False :
2019-02-08 06:19:05 +00:00
self . _core . _utils . localCommand ( ' waitforshare/ ' + bl , post = True )
2018-11-13 17:07:46 +00:00
finishedUploads . append ( bl )
for x in finishedUploads :
2018-11-15 20:47:35 +00:00
try :
self . blocksToUpload . remove ( x )
except ValueError :
pass
2018-11-13 17:07:46 +00:00
self . decrementThreadCount ( ' uploadBlock ' )
2018-07-23 07:43:10 +00:00
2018-06-13 07:33:37 +00:00
def announce ( self , peer ) :
2018-07-02 04:04:14 +00:00
''' Announce to peers our address '''
2018-10-30 22:22:06 +00:00
if self . daemonTools . announceNode ( ) == False :
2018-08-08 19:26:02 +00:00
logger . warn ( ' Could not introduce node. ' )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def detectAPICrash ( self ) :
''' exit if the api server crashes/stops '''
2018-12-22 19:02:09 +00:00
if self . _core . _utils . localCommand ( ' ping ' , silent = False ) not in ( ' pong ' , ' pong! ' ) :
2019-04-09 17:35:40 +00:00
for i in range ( 12 ) :
2019-02-14 23:48:41 +00:00
if self . _core . _utils . localCommand ( ' ping ' ) in ( ' pong ' , ' pong! ' ) or self . shutdown :
2018-06-13 03:43:39 +00:00
break # break for loop
time . sleep ( 1 )
else :
# This executes if the api is NOT detected to be running
2018-07-31 04:41:32 +00:00
events . event ( ' daemon_crash ' , onionr = None , data = { } )
2018-06-13 03:43:39 +00:00
logger . error ( ' Daemon detected API crash (or otherwise unable to reach API after long time), stopping... ' )
self . shutdown = True
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' detectAPICrash ' )
2018-06-13 07:33:37 +00:00
2018-11-10 07:17:19 +00:00
def runCheck ( self ) :
if self . daemonTools . runCheck ( ) :
logger . debug ( ' Status check; looks good. ' )
self . decrementThreadCount ( ' runCheck ' )
2019-01-20 18:09:53 +00:00
def startCommunicator ( onionrInst , proxyPort ) :
OnionrCommunicatorDaemon ( onionrInst , proxyPort )