2018-06-10 08:00:01 +00:00
#!/usr/bin/env python3
'''
2018-08-27 03:44:32 +00:00
Onionr - P2P Anonymous Storage Network
2018-06-10 08:00:01 +00:00
This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon , getting commands from the command queue database ( see core . Core . daemonQueue )
'''
'''
This program is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < https : / / www . gnu . org / licenses / > .
'''
2018-09-23 01:21:39 +00:00
import sys , os , core , config , json , requests , time , logger , threading , base64 , onionr , uuid
2018-07-31 04:41:32 +00:00
import onionrexceptions , onionrpeers , onionrevents as events , onionrplugins as plugins , onionrblockapi as block
2019-01-16 05:57:47 +00:00
import onionrdaemontools , onionrsockets , onionr , onionrproofs , proofofmemory
2018-12-22 19:02:09 +00:00
import binascii
2018-09-20 17:04:58 +00:00
from dependencies import secrets
2018-06-12 07:34:33 +00:00
from defusedxml import minidom
2018-06-11 07:40:45 +00:00
class OnionrCommunicatorDaemon :
2018-06-10 08:00:01 +00:00
def __init__ ( self , debug , developmentMode ) :
2018-11-11 03:25:40 +00:00
# configure logger and stuff
onionr . Onionr . setupConfig ( ' data/ ' , self = self )
2018-06-14 04:35:56 +00:00
2018-08-21 20:01:50 +00:00
self . isOnline = True # Assume we're connected to the internet
2018-07-01 21:01:19 +00:00
# list of timer instances
2018-06-12 23:32:33 +00:00
self . timers = [ ]
2018-07-01 21:01:19 +00:00
# initalize core with Tor socks port being 3rd argument
self . proxyPort = sys . argv [ 2 ]
self . _core = core . Core ( torPort = self . proxyPort )
# intalize NIST beacon salt and time
2018-06-12 07:34:33 +00:00
self . nistSaltTimestamp = 0
self . powSalt = 0
2018-07-30 00:37:12 +00:00
2018-11-13 17:07:46 +00:00
self . blocksToUpload = [ ]
2018-07-01 21:01:19 +00:00
# loop time.sleep delay in seconds
2018-06-12 23:32:33 +00:00
self . delay = 1
2018-07-01 21:01:19 +00:00
# time app started running for info/statistics purposes
2018-06-26 05:26:01 +00:00
self . startTime = self . _core . _utils . getEpoch ( )
2018-06-13 07:33:37 +00:00
2018-07-01 21:01:19 +00:00
# lists of connected peers and peers we know we can't reach currently
2018-06-13 22:22:48 +00:00
self . onlinePeers = [ ]
2018-06-16 20:54:56 +00:00
self . offlinePeers = [ ]
2018-08-31 22:53:48 +00:00
self . cooldownPeer = { }
self . connectTimes = { }
2018-07-27 03:07:50 +00:00
self . peerProfiles = [ ] # list of peer's profiles (onionrpeers.PeerProfile instances)
2018-06-13 22:22:48 +00:00
2018-07-01 21:01:19 +00:00
# amount of threads running by name, used to prevent too many
2018-06-13 07:33:37 +00:00
self . threadCounts = { }
2018-07-06 04:27:12 +00:00
2018-07-01 21:01:19 +00:00
# set true when shutdown command recieved
2018-06-13 03:43:39 +00:00
self . shutdown = False
2018-06-15 05:45:07 +00:00
2018-07-01 21:01:19 +00:00
# list of new blocks to download, added to when new block lists are fetched from peers
self . blockQueue = [ ]
2018-07-06 04:27:12 +00:00
2018-07-13 21:02:41 +00:00
# list of blocks currently downloading, avoid s
self . currentDownloading = [ ]
2018-12-09 17:29:39 +00:00
# timestamp when the last online node was seen
self . lastNodeSeen = None
2019-01-16 05:57:47 +00:00
# Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time
self . dbTimestamps = { }
2018-06-13 03:43:39 +00:00
# Clear the daemon queue for any dead messages
2018-06-13 07:33:37 +00:00
if os . path . exists ( self . _core . queueDB ) :
self . _core . clearDaemonQueue ( )
# Loads in and starts the enabled plugins
plugins . reload ( )
2019-01-16 05:57:47 +00:00
self . proofofmemory = proofofmemory . ProofOfMemory ( self )
2018-08-07 07:31:53 +00:00
# daemon tools are misc daemon functions, e.g. announce to online peers
# intended only for use by OnionrCommunicatorDaemon
self . daemonTools = onionrdaemontools . DaemonTools ( self )
2018-09-15 01:05:25 +00:00
2018-06-13 03:43:39 +00:00
if debug or developmentMode :
2018-10-30 22:22:06 +00:00
OnionrCommunicatorTimers ( self , self . heartbeat , 30 )
2018-07-06 04:27:12 +00:00
2018-07-02 04:04:14 +00:00
# Set timers, function reference, seconds
2018-08-03 06:28:26 +00:00
# requiresPeer True means the timer function won't fire if we have no connected peers
2018-08-31 22:53:48 +00:00
peerPoolTimer = OnionrCommunicatorTimers ( self , self . getOnlinePeers , 60 , maxThreads = 1 )
2018-11-17 07:23:10 +00:00
OnionrCommunicatorTimers ( self , self . runCheck , 1 )
2018-08-25 14:33:38 +00:00
OnionrCommunicatorTimers ( self , self . lookupBlocks , self . _core . config . get ( ' timers.lookupBlocks ' ) , requiresPeer = True , maxThreads = 1 )
2019-01-09 16:54:35 +00:00
OnionrCommunicatorTimers ( self , self . getBlocks , self . _core . config . get ( ' timers.getBlocks ' ) , requiresPeer = True , maxThreads = 2 )
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . clearOfflinePeer , 58 )
2018-08-23 19:46:23 +00:00
OnionrCommunicatorTimers ( self , self . daemonTools . cleanOldBlocks , 65 )
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . lookupAdders , 60 , requiresPeer = True )
2018-08-31 22:53:48 +00:00
OnionrCommunicatorTimers ( self , self . daemonTools . cooldownPeer , 30 , requiresPeer = True )
2018-11-13 17:07:46 +00:00
OnionrCommunicatorTimers ( self , self . uploadBlock , 10 , requiresPeer = True , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
OnionrCommunicatorTimers ( self , self . daemonCommands , 6 , maxThreads = 1 )
2018-12-24 06:12:46 +00:00
OnionrCommunicatorTimers ( self , self . detectAPICrash , 30 , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
deniableBlockTimer = OnionrCommunicatorTimers ( self , self . daemonTools . insertDeniableBlock , 180 , requiresPeer = True , maxThreads = 1 )
2018-08-21 20:01:50 +00:00
netCheckTimer = OnionrCommunicatorTimers ( self , self . daemonTools . netCheck , 600 )
2018-12-09 17:29:39 +00:00
if config . get ( ' general.security_level ' ) == 0 :
2019-01-11 22:59:21 +00:00
announceTimer = OnionrCommunicatorTimers ( self , self . daemonTools . announceNode , 3600 , requiresPeer = True , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
announceTimer . count = ( announceTimer . frequency - 120 )
else :
logger . debug ( ' Will not announce node. ' )
2018-08-03 06:28:26 +00:00
cleanupTimer = OnionrCommunicatorTimers ( self , self . peerCleanup , 300 , requiresPeer = True )
2018-11-09 19:07:26 +00:00
forwardSecrecyTimer = OnionrCommunicatorTimers ( self , self . daemonTools . cleanKeys , 15 )
2018-07-09 07:02:33 +00:00
# set loop to execute instantly to load up peer pool (replaced old pool init wait)
2018-07-23 07:43:10 +00:00
peerPoolTimer . count = ( peerPoolTimer . frequency - 1 )
2018-08-03 06:28:26 +00:00
cleanupTimer . count = ( cleanupTimer . frequency - 60 )
2018-12-09 17:29:39 +00:00
deniableBlockTimer . count = ( deniableBlockTimer . frequency - 175 )
2018-11-09 19:07:26 +00:00
#forwardSecrecyTimer.count = (forwardSecrecyTimer.frequency - 990)
2018-06-13 03:43:39 +00:00
2018-12-09 17:29:39 +00:00
if config . get ( ' general.socket_servers ' ) :
self . socketServer = threading . Thread ( target = onionrsockets . OnionrSocketServer , args = ( self . _core , ) )
self . socketServer . start ( )
self . socketClient = onionrsockets . OnionrSocketClient ( self . _core )
2018-09-21 04:47:40 +00:00
2018-12-09 17:29:39 +00:00
# Loads chat messages into memory
threading . Thread ( target = self . _chat . chatHandler ) . start ( )
2018-06-13 03:43:39 +00:00
2018-07-02 04:04:14 +00:00
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
2018-07-08 00:26:01 +00:00
try :
while not self . shutdown :
for i in self . timers :
if self . shutdown :
break
i . processTimer ( )
time . sleep ( self . delay )
except KeyboardInterrupt :
self . shutdown = True
pass
2018-07-06 04:27:12 +00:00
2018-06-13 03:43:39 +00:00
logger . info ( ' Goodbye. ' )
2018-09-23 04:53:09 +00:00
self . _core . killSockets = True
2018-08-23 04:59:41 +00:00
self . _core . _utils . localCommand ( ' shutdown ' ) # shutdown the api
2018-07-08 00:26:01 +00:00
time . sleep ( 0.5 )
2018-07-23 07:43:10 +00:00
2018-06-23 07:36:22 +00:00
def lookupAdders ( self ) :
''' Lookup new peer addresses '''
2018-11-10 07:17:19 +00:00
logger . info ( ' Looking up new addresses... ' )
2018-06-23 07:36:22 +00:00
tryAmount = 1
for i in range ( tryAmount ) :
2018-07-02 04:04:14 +00:00
# Download new peer address list from random online peers
2018-06-23 07:36:22 +00:00
peer = self . pickOnlinePeer ( )
newAdders = self . peerAction ( peer , action = ' pex ' )
self . _core . _utils . mergeAdders ( newAdders )
2018-07-19 22:32:21 +00:00
self . decrementThreadCount ( ' lookupAdders ' )
2018-07-02 04:04:14 +00:00
2018-06-15 05:45:07 +00:00
def lookupBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' Lookup new blocks & add them to download queue '''
2018-11-10 07:17:19 +00:00
logger . info ( ' Looking up new blocks... ' )
2018-06-15 05:45:07 +00:00
tryAmount = 2
newBlocks = ' '
2018-07-10 07:29:17 +00:00
existingBlocks = self . _core . getBlockList ( )
2018-07-31 05:28:10 +00:00
triedPeers = [ ] # list of peers we've tried this time around
2018-11-09 05:22:43 +00:00
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
2019-01-16 05:57:47 +00:00
lastLookupTime = 0 # Last time we looked up a particular peer's list
2018-06-15 05:45:07 +00:00
for i in range ( tryAmount ) :
2019-01-16 05:57:47 +00:00
listLookupCommand = ' getblocklist ' # This is defined here to reset it each time
2018-11-09 05:22:43 +00:00
if len ( self . blockQueue ) > = maxBacklog :
break
2018-08-23 17:48:49 +00:00
if not self . isOnline :
break
2018-11-09 05:22:43 +00:00
# check if disk allocation is used
2018-08-23 04:59:41 +00:00
if self . _core . _utils . storageCounter . isFull ( ) :
2018-08-23 19:46:23 +00:00
logger . debug ( ' Not looking up new blocks due to maximum amount of allowed disk space used ' )
2018-08-23 04:59:41 +00:00
break
2018-07-02 04:04:14 +00:00
peer = self . pickOnlinePeer ( ) # select random online peer
2018-07-31 05:28:10 +00:00
# if we've already tried all the online peers this time around, stop
if peer in triedPeers :
if len ( self . onlinePeers ) == len ( triedPeers ) :
break
else :
continue
2018-12-20 06:01:53 +00:00
newDBHash = self . peerAction ( peer , ' getdbhash ' ) # get their db hash
2018-11-13 17:07:46 +00:00
if newDBHash == False or not self . _core . _utils . validateHash ( newDBHash ) :
2018-07-02 04:04:14 +00:00
continue # if request failed, restart loop (peer is added to offline peers automatically)
2018-07-31 05:28:10 +00:00
triedPeers . append ( peer )
2018-06-16 20:54:56 +00:00
if newDBHash != self . _core . getAddressInfo ( peer , ' DBHash ' ) :
self . _core . setAddressInfo ( peer , ' DBHash ' , newDBHash )
2019-01-16 05:57:47 +00:00
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
# Saved in memory only for privacy reasons
try :
lastLookupTime = self . dbTimestamps [ peer ]
except KeyError :
lastLookupTime = 0
else :
listLookupCommand + = ' ?date= %s ' % ( lastLookupTime , )
2018-08-01 07:22:22 +00:00
try :
2019-01-17 05:31:56 +00:00
newBlocks = self . peerAction ( peer , listLookupCommand ) # get list of new block hashes
2018-08-01 07:22:22 +00:00
except Exception as error :
2018-11-10 07:17:19 +00:00
logger . warn ( ' Could not get new blocks from %s . ' % peer , error = error )
2018-08-01 07:22:22 +00:00
newBlocks = False
2019-01-16 05:57:47 +00:00
else :
self . dbTimestamps [ peer ] = self . _core . _utils . getRoundedEpoch ( roundS = 60 )
2018-06-16 20:54:56 +00:00
if newBlocks != False :
# if request was a success
for i in newBlocks . split ( ' \n ' ) :
if self . _core . _utils . validateHash ( i ) :
# if newline seperated string is valid hash
2018-07-10 07:29:17 +00:00
if not i in existingBlocks :
2018-06-16 20:54:56 +00:00
# if block does not exist on disk and is not already in block queue
2018-08-11 05:23:59 +00:00
if i not in self . blockQueue and not self . _core . _blacklist . inBlacklist ( i ) :
2018-12-09 17:29:39 +00:00
if onionrproofs . hashMeetsDifficulty ( i ) :
self . blockQueue . append ( i ) # add blocks to download queue
2018-06-15 05:45:07 +00:00
self . decrementThreadCount ( ' lookupBlocks ' )
return
def getBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' download new blocks in queue '''
2018-06-15 19:09:41 +00:00
for blockHash in self . blockQueue :
2018-08-23 19:46:23 +00:00
removeFromQueue = True
2018-08-23 17:48:49 +00:00
if self . shutdown or not self . isOnline :
# Exit loop if shutting down or offline
2018-08-03 06:28:26 +00:00
break
2018-08-23 04:59:41 +00:00
# Do not download blocks being downloaded or that are already saved (edge cases)
2018-07-13 21:02:41 +00:00
if blockHash in self . currentDownloading :
2018-11-10 07:17:19 +00:00
logger . debug ( ' Already downloading block %s ... ' % blockHash )
2018-07-13 21:02:41 +00:00
continue
2018-08-17 04:21:21 +00:00
if blockHash in self . _core . getBlockList ( ) :
2018-11-10 07:17:19 +00:00
logger . debug ( ' Block %s is already saved. ' % ( blockHash , ) )
2018-08-17 04:21:21 +00:00
self . blockQueue . remove ( blockHash )
continue
2018-08-24 22:42:09 +00:00
if self . _core . _blacklist . inBlacklist ( blockHash ) :
continue
2018-08-23 19:46:23 +00:00
if self . _core . _utils . storageCounter . isFull ( ) :
break
2018-08-23 04:59:41 +00:00
self . currentDownloading . append ( blockHash ) # So we can avoid concurrent downloading in other threads of same block
2018-08-03 20:01:13 +00:00
peerUsed = self . pickOnlinePeer ( )
2019-01-17 05:31:56 +00:00
if not self . shutdown and peerUsed . strip ( ) != ' ' :
logger . info ( " Attempting to download %s from %s ... " % ( blockHash [ : 12 ] , peerUsed ) )
2018-12-20 06:01:53 +00:00
content = self . peerAction ( peerUsed , ' getdata/ ' + blockHash ) # block content from random peer (includes metadata)
2018-10-02 16:45:56 +00:00
if content != False and len ( content ) > 0 :
2018-06-16 07:33:54 +00:00
try :
content = content . encode ( )
except AttributeError :
pass
2018-12-22 19:02:09 +00:00
try :
content = base64 . b64decode ( content ) # content is base64 encoded in transport
except binascii . Error :
pass
2018-07-10 07:15:55 +00:00
realHash = self . _core . _crypto . sha3Hash ( content )
try :
realHash = realHash . decode ( ) # bytes on some versions for some reason
except AttributeError :
pass
if realHash == blockHash :
2018-06-16 20:54:56 +00:00
content = content . decode ( ) # decode here because sha3Hash needs bytes above
2018-06-26 04:39:45 +00:00
metas = self . _core . _utils . getBlockMetadataFromData ( content ) # returns tuple(metadata, meta), meta is also in metadata
2018-06-16 07:33:54 +00:00
metadata = metas [ 0 ]
2018-08-16 05:01:40 +00:00
if self . _core . _utils . validateMetadata ( metadata , metas [ 2 ] ) : # check if metadata is valid, and verify nonce
2018-07-08 07:51:23 +00:00
if self . _core . _crypto . verifyPow ( content ) : # check if POW is enough/correct
2019-01-16 05:57:47 +00:00
logger . info ( ' Attempting to save block %s ... ' % blockHash [ : 12 ] )
2018-08-23 04:59:41 +00:00
try :
self . _core . setData ( content )
except onionrexceptions . DiskAllocationReached :
2018-11-10 07:17:19 +00:00
logger . error ( ' Reached disk allocation allowance, cannot save block %s . ' % blockHash )
2018-08-23 19:46:23 +00:00
removeFromQueue = False
2018-08-23 04:59:41 +00:00
else :
self . _core . addToBlockDB ( blockHash , dataSaved = True )
self . _core . _utils . processBlockMetadata ( blockHash ) # caches block metadata values to block database
2018-06-26 04:39:45 +00:00
else :
2018-11-10 07:17:19 +00:00
logger . warn ( ' POW failed for block %s . ' % blockHash )
2018-06-16 20:54:56 +00:00
else :
2018-08-17 03:30:36 +00:00
if self . _core . _blacklist . inBlacklist ( realHash ) :
2018-11-10 07:17:19 +00:00
logger . warn ( ' Block %s is blacklisted. ' % ( realHash , ) )
2018-08-17 03:30:36 +00:00
else :
2018-11-10 07:17:19 +00:00
logger . warn ( ' Metadata for block %s is invalid. ' % blockHash )
2018-08-17 03:30:36 +00:00
self . _core . _blacklist . addToDB ( blockHash )
2018-06-16 20:54:56 +00:00
else :
2018-07-02 04:04:14 +00:00
# if block didn't meet expected hash
2018-07-10 07:11:58 +00:00
tempHash = self . _core . _crypto . sha3Hash ( content ) # lazy hack, TODO use var
try :
tempHash = tempHash . decode ( )
except AttributeError :
pass
2018-08-03 20:01:13 +00:00
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
2018-09-24 23:48:00 +00:00
onionrpeers . PeerProfiles ( peerUsed , self . _core ) . addScore ( - 50 )
2019-01-05 22:16:36 +00:00
if tempHash != ' ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253 ' :
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger . warn ( ' Block hash validation failed for ' + blockHash + ' got ' + tempHash )
2018-08-23 19:46:23 +00:00
if removeFromQueue :
2018-10-02 16:45:56 +00:00
try :
self . blockQueue . remove ( blockHash ) # remove from block queue both if success or false
except ValueError :
pass
2018-07-23 07:43:10 +00:00
self . currentDownloading . remove ( blockHash )
2018-06-22 00:34:42 +00:00
self . decrementThreadCount ( ' getBlocks ' )
2018-06-15 05:45:07 +00:00
return
def pickOnlinePeer ( self ) :
''' randomly picks peer from pool without bias (using secrets module) '''
retData = ' '
while True :
peerLength = len ( self . onlinePeers )
2018-06-16 20:54:56 +00:00
if peerLength < = 0 :
break
2018-06-15 05:45:07 +00:00
try :
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
retData = self . onlinePeers [ self . _core . _crypto . secrets . randbelow ( peerLength ) ]
except IndexError :
pass
else :
break
return retData
2018-06-13 22:22:48 +00:00
def decrementThreadCount ( self , threadName ) :
2018-06-15 05:45:07 +00:00
''' Decrement amount of a thread name if more than zero, called when a function meant to be run in a thread ends '''
2018-06-16 20:54:56 +00:00
try :
if self . threadCounts [ threadName ] > 0 :
self . threadCounts [ threadName ] - = 1
except KeyError :
pass
2018-07-06 04:27:12 +00:00
2018-06-21 07:24:58 +00:00
def clearOfflinePeer ( self ) :
''' Removes the longest offline peer to retry later '''
try :
2018-06-22 00:57:12 +00:00
removed = self . offlinePeers . pop ( 0 )
2018-06-21 07:24:58 +00:00
except IndexError :
pass
2018-06-22 00:57:12 +00:00
else :
2018-07-06 04:27:12 +00:00
logger . debug ( ' Removed ' + removed + ' from offline list, will try them again. ' )
2018-06-21 07:24:58 +00:00
self . decrementThreadCount ( ' clearOfflinePeer ' )
2018-06-13 22:22:48 +00:00
def getOnlinePeers ( self ) :
2018-11-11 03:25:40 +00:00
'''
Manages the self . onlinePeers attribute list , connects to more peers if we have none connected
'''
2018-07-02 04:04:14 +00:00
2018-11-11 03:25:40 +00:00
logger . debug ( ' Refreshing peer pool... ' )
2018-09-24 23:48:00 +00:00
maxPeers = int ( config . get ( ' peers.max_connect ' , 10 ) )
2018-06-13 22:22:48 +00:00
needed = maxPeers - len ( self . onlinePeers )
for i in range ( needed ) :
2018-07-03 08:18:07 +00:00
if len ( self . onlinePeers ) == 0 :
self . connectNewPeer ( useBootstrap = True )
2018-08-02 20:18:01 +00:00
else :
self . connectNewPeer ( )
2018-12-09 17:29:39 +00:00
2018-07-09 07:02:33 +00:00
if self . shutdown :
break
else :
if len ( self . onlinePeers ) == 0 :
2018-12-09 17:29:39 +00:00
logger . debug ( ' Couldn \' t connect to any peers. ' + ( ' Last node seen %s ago. ' % self . daemonTools . humanReadableTime ( time . time ( ) - self . lastNodeSeen ) if not self . lastNodeSeen is None else ' ' ) )
else :
self . lastNodeSeen = time . time ( )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' getOnlinePeers ' )
2018-07-03 08:18:07 +00:00
def addBootstrapListToPeerList ( self , peerList ) :
2018-11-11 03:25:40 +00:00
'''
Add the bootstrap list to the peer list ( no duplicates )
'''
2018-07-01 21:01:19 +00:00
for i in self . _core . bootstrapList :
2018-11-10 07:22:27 +00:00
if i not in peerList and i not in self . offlinePeers and i != self . _core . hsAddress and len ( str ( i ) . strip ( ) ) > 0 :
2018-07-01 21:01:19 +00:00
peerList . append ( i )
2018-08-11 05:23:59 +00:00
self . _core . addAddress ( i )
2018-07-01 21:01:19 +00:00
2018-07-03 08:18:07 +00:00
def connectNewPeer ( self , peer = ' ' , useBootstrap = False ) :
2018-06-13 22:22:48 +00:00
''' Adds a new random online peer to self.onlinePeers '''
retData = False
2018-06-16 20:54:56 +00:00
tried = self . offlinePeers
2018-06-13 22:22:48 +00:00
if peer != ' ' :
if self . _core . _utils . validateID ( peer ) :
peerList = [ peer ]
else :
raise onionrexceptions . InvalidAddress ( ' Will not attempt connection test to invalid address ' )
else :
peerList = self . _core . listAdders ( )
2018-09-24 23:48:00 +00:00
2018-08-01 07:22:22 +00:00
peerList = onionrpeers . getScoreSortedPeerList ( self . _core )
2018-06-13 22:22:48 +00:00
2018-07-03 08:18:07 +00:00
if len ( peerList ) == 0 or useBootstrap :
2018-07-01 21:01:19 +00:00
# Avoid duplicating bootstrap addresses in peerList
2018-07-03 08:18:07 +00:00
self . addBootstrapListToPeerList ( peerList )
2018-06-13 22:22:48 +00:00
for address in peerList :
2018-12-09 17:29:39 +00:00
if not config . get ( ' tor.v3onions ' ) and len ( address ) == 62 :
2018-08-19 04:07:09 +00:00
continue
2018-08-31 22:53:48 +00:00
if len ( address ) == 0 or address in tried or address in self . onlinePeers or address in self . cooldownPeer :
2018-06-16 20:54:56 +00:00
continue
2018-07-31 05:28:10 +00:00
if self . shutdown :
return
2018-06-13 22:22:48 +00:00
if self . peerAction ( address , ' ping ' ) == ' pong! ' :
2018-07-01 21:01:19 +00:00
logger . info ( ' Connected to ' + address )
2018-07-03 23:44:12 +00:00
time . sleep ( 0.1 )
if address not in self . onlinePeers :
self . onlinePeers . append ( address )
2018-08-31 22:53:48 +00:00
self . connectTimes [ address ] = self . _core . _utils . getEpoch ( )
2018-06-13 22:22:48 +00:00
retData = address
2018-09-24 23:48:00 +00:00
2018-07-30 22:48:29 +00:00
# add peer to profile list if they're not in it
for profile in self . peerProfiles :
if profile . address == address :
break
else :
self . peerProfiles . append ( onionrpeers . PeerProfiles ( address , self . _core ) )
2018-06-13 22:22:48 +00:00
break
else :
2018-06-16 20:54:56 +00:00
tried . append ( address )
2018-07-01 21:01:19 +00:00
logger . debug ( ' Failed to connect to ' + address )
2018-06-15 05:45:07 +00:00
return retData
2018-07-06 04:27:12 +00:00
2018-08-31 22:53:48 +00:00
def removeOnlinePeer ( self , peer ) :
''' Remove an online peer '''
try :
del self . connectTimes [ peer ]
except KeyError :
pass
2019-01-16 05:57:47 +00:00
try :
del self . dbTimestamps [ peer ]
except KeyError :
pass
2018-08-31 22:53:48 +00:00
try :
self . onlinePeers . remove ( peer )
except ValueError :
pass
2018-08-02 07:28:26 +00:00
def peerCleanup ( self ) :
''' This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow) '''
onionrpeers . peerCleanup ( self . _core )
2018-08-03 06:28:26 +00:00
self . decrementThreadCount ( ' peerCleanup ' )
2018-08-02 07:28:26 +00:00
2018-06-15 05:45:07 +00:00
def printOnlinePeers ( self ) :
''' logs online peer list '''
if len ( self . onlinePeers ) == 0 :
logger . warn ( ' No online peers ' )
2018-07-03 21:24:14 +00:00
else :
logger . info ( ' Online peers: ' )
for i in self . onlinePeers :
2018-07-30 22:48:29 +00:00
score = str ( self . getPeerProfileInstance ( i ) . score )
logger . info ( i + ' , score: ' + score )
2018-06-15 05:45:07 +00:00
def peerAction ( self , peer , action , data = ' ' ) :
2018-07-30 00:37:12 +00:00
''' Perform a get request to a peer '''
2018-06-21 07:17:20 +00:00
if len ( peer ) == 0 :
return False
2018-11-15 20:47:35 +00:00
#logger.debug('Performing ' + action + ' with ' + peer + ' on port ' + str(self.proxyPort))
2018-12-22 19:02:09 +00:00
url = ' http:// %s / %s ' % ( peer , action )
2018-06-16 20:54:56 +00:00
if len ( data ) > 0 :
url + = ' &data= ' + data
2018-08-02 20:18:01 +00:00
self . _core . setAddressInfo ( peer , ' lastConnectAttempt ' , self . _core . _utils . getEpoch ( ) ) # mark the time we're trying to request this peer
2018-06-16 20:54:56 +00:00
retData = self . _core . _utils . doGetRequest ( url , port = self . proxyPort )
2018-07-02 04:04:14 +00:00
# if request failed, (error), mark peer offline
2018-06-15 05:45:07 +00:00
if retData == False :
2018-06-15 19:09:41 +00:00
try :
2018-08-01 07:22:22 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( - 10 )
2018-08-31 22:53:48 +00:00
self . removeOnlinePeer ( peer )
2018-09-01 03:29:57 +00:00
if action != ' ping ' :
self . getOnlinePeers ( ) # Will only add a new peer to pool if needed
2018-06-15 19:09:41 +00:00
except ValueError :
pass
2018-07-30 22:48:29 +00:00
else :
2018-08-01 07:22:22 +00:00
self . _core . setAddressInfo ( peer , ' lastConnect ' , self . _core . _utils . getEpoch ( ) )
2018-07-30 22:48:29 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( 1 )
return retData
2018-09-24 23:48:00 +00:00
2018-07-30 22:48:29 +00:00
def getPeerProfileInstance ( self , peer ) :
''' Gets a peer profile instance from the list of profiles, by address name '''
for i in self . peerProfiles :
# if the peer's profile is already loaded, return that
if i . address == peer :
retData = i
break
else :
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
retData = onionrpeers . PeerProfiles ( peer , self . _core )
2018-06-15 05:45:07 +00:00
return retData
2018-06-12 23:32:33 +00:00
def heartbeat ( self ) :
2018-06-13 03:43:39 +00:00
''' Show a heartbeat debug message '''
2018-06-26 05:26:01 +00:00
currentTime = self . _core . _utils . getEpoch ( ) - self . startTime
2018-12-09 17:29:39 +00:00
logger . debug ( ' Heartbeat. Node running for %s . ' % self . daemonTools . humanReadableTime ( currentTime ) )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' heartbeat ' )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def daemonCommands ( self ) :
2018-11-11 03:25:40 +00:00
'''
Process daemon commands from daemonQueue
'''
2018-06-13 03:43:39 +00:00
cmd = self . _core . daemonQueue ( )
2019-01-07 05:50:20 +00:00
response = ' '
2018-06-13 03:43:39 +00:00
if cmd is not False :
2018-07-31 04:41:32 +00:00
events . event ( ' daemon_command ' , onionr = None , data = { ' cmd ' : cmd } )
2018-06-13 03:43:39 +00:00
if cmd [ 0 ] == ' shutdown ' :
self . shutdown = True
2018-06-13 22:22:48 +00:00
elif cmd [ 0 ] == ' announceNode ' :
2018-11-09 19:07:26 +00:00
if len ( self . onlinePeers ) > 0 :
self . announce ( cmd [ 1 ] )
else :
2018-12-09 17:29:39 +00:00
logger . debug ( " No nodes connected. Will not introduce node. " )
2018-11-10 07:17:19 +00:00
elif cmd [ 0 ] == ' runCheck ' : # deprecated
2018-06-13 22:22:48 +00:00
logger . debug ( ' Status check; looks good. ' )
2018-09-26 04:58:11 +00:00
open ( self . _core . dataDir + ' .runcheck ' , ' w+ ' ) . close ( )
2018-06-13 22:22:48 +00:00
elif cmd [ 0 ] == ' connectedPeers ' :
2019-01-07 05:50:20 +00:00
response = ' \n ' . join ( list ( self . onlinePeers ) ) . strip ( )
2019-01-13 22:20:10 +00:00
if response == ' ' :
response = ' none '
2018-08-02 20:18:01 +00:00
elif cmd [ 0 ] == ' pex ' :
for i in self . timers :
if i . timerFunction . __name__ == ' lookupAdders ' :
i . count = ( i . frequency - 1 )
2018-07-23 07:43:10 +00:00
elif cmd [ 0 ] == ' uploadBlock ' :
2018-11-13 17:07:46 +00:00
self . blocksToUpload . append ( cmd [ 1 ] )
2018-09-22 05:01:17 +00:00
elif cmd [ 0 ] == ' startSocket ' :
2018-09-23 01:21:39 +00:00
# Create our own socket server
2018-09-21 04:47:40 +00:00
socketInfo = json . loads ( cmd [ 1 ] )
2018-09-23 04:53:09 +00:00
socketInfo [ ' id ' ] = uuid . uuid4 ( )
self . _core . startSocket = socketInfo
2018-09-23 01:21:39 +00:00
elif cmd [ 0 ] == ' addSocket ' :
# Socket server was created for us
socketInfo = json . loads ( cmd [ 1 ] )
peer = socketInfo [ ' peer ' ]
reason = socketInfo [ ' reason ' ]
threading . Thread ( target = self . socketClient . startSocket , args = ( peer , reason ) ) . start ( )
2018-06-13 03:43:39 +00:00
else :
logger . info ( ' Recieved daemonQueue command: ' + cmd [ 0 ] )
2018-07-31 04:41:32 +00:00
2019-01-13 22:20:10 +00:00
if cmd [ 0 ] not in ( ' ' , None ) :
2019-01-07 05:50:20 +00:00
if response != ' ' :
2019-01-07 21:09:58 +00:00
self . _core . _utils . localCommand ( ' queueResponseAdd/ ' + cmd [ 4 ] , post = True , postData = { ' data ' : response } )
response = ' '
2019-01-07 05:50:20 +00:00
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' daemonCommands ' )
2018-07-23 07:43:10 +00:00
def uploadBlock ( self ) :
2018-07-27 03:07:50 +00:00
''' Upload our block to a few peers '''
# when inserting a block, we try to upload it to a few peers to add some deniability
2018-07-23 20:04:36 +00:00
triedPeers = [ ]
2018-11-13 17:07:46 +00:00
finishedUploads = [ ]
if len ( self . blocksToUpload ) != 0 :
for bl in self . blocksToUpload :
if not self . _core . _utils . validateHash ( bl ) :
logger . warn ( ' Requested to upload invalid block ' )
2018-11-15 20:47:35 +00:00
self . decrementThreadCount ( ' uploadBlock ' )
2018-11-13 17:07:46 +00:00
return
for i in range ( max ( len ( self . onlinePeers ) , 2 ) ) :
peer = self . pickOnlinePeer ( )
if peer in triedPeers :
continue
triedPeers . append ( peer )
2018-12-20 06:01:53 +00:00
url = ' http:// ' + peer + ' /upload '
2018-11-13 17:07:46 +00:00
data = { ' block ' : block . Block ( bl ) . getRaw ( ) }
proxyType = ' '
if peer . endswith ( ' .onion ' ) :
proxyType = ' tor '
elif peer . endswith ( ' .i2p ' ) :
proxyType = ' i2p '
2018-11-15 20:47:35 +00:00
logger . info ( " Uploading block to " + peer )
2018-11-13 17:07:46 +00:00
if not self . _core . _utils . doPostRequest ( url , data = data , proxyType = proxyType ) == False :
2018-12-20 06:01:53 +00:00
self . _core . _utils . localCommand ( ' waitforshare/ ' + bl )
2018-11-13 17:07:46 +00:00
finishedUploads . append ( bl )
2018-11-15 20:47:35 +00:00
break
2018-11-13 17:07:46 +00:00
for x in finishedUploads :
2018-11-15 20:47:35 +00:00
try :
self . blocksToUpload . remove ( x )
except ValueError :
pass
2018-11-13 17:07:46 +00:00
self . decrementThreadCount ( ' uploadBlock ' )
2018-07-23 07:43:10 +00:00
2018-06-13 07:33:37 +00:00
def announce ( self , peer ) :
2018-07-02 04:04:14 +00:00
''' Announce to peers our address '''
2018-10-30 22:22:06 +00:00
if self . daemonTools . announceNode ( ) == False :
2018-08-08 19:26:02 +00:00
logger . warn ( ' Could not introduce node. ' )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def detectAPICrash ( self ) :
''' exit if the api server crashes/stops '''
2018-12-22 19:02:09 +00:00
if self . _core . _utils . localCommand ( ' ping ' , silent = False ) not in ( ' pong ' , ' pong! ' ) :
2018-06-15 05:45:07 +00:00
for i in range ( 5 ) :
2018-12-22 19:02:09 +00:00
if self . _core . _utils . localCommand ( ' ping ' ) in ( ' pong ' , ' pong! ' ) :
2018-06-13 03:43:39 +00:00
break # break for loop
time . sleep ( 1 )
else :
# This executes if the api is NOT detected to be running
2018-07-31 04:41:32 +00:00
events . event ( ' daemon_crash ' , onionr = None , data = { } )
2018-06-13 03:43:39 +00:00
logger . error ( ' Daemon detected API crash (or otherwise unable to reach API after long time), stopping... ' )
self . shutdown = True
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' detectAPICrash ' )
2018-06-13 07:33:37 +00:00
2018-11-10 07:17:19 +00:00
def runCheck ( self ) :
if self . daemonTools . runCheck ( ) :
logger . debug ( ' Status check; looks good. ' )
self . decrementThreadCount ( ' runCheck ' )
2018-06-12 23:32:33 +00:00
class OnionrCommunicatorTimers :
2018-07-09 07:02:33 +00:00
def __init__ ( self , daemonInstance , timerFunction , frequency , makeThread = True , threadAmount = 1 , maxThreads = 5 , requiresPeer = False ) :
2018-06-12 23:32:33 +00:00
self . timerFunction = timerFunction
self . frequency = frequency
2018-06-13 03:43:39 +00:00
self . threadAmount = threadAmount
self . makeThread = makeThread
2018-07-09 07:02:33 +00:00
self . requiresPeer = requiresPeer
2018-06-13 03:43:39 +00:00
self . daemonInstance = daemonInstance
2018-06-13 07:33:37 +00:00
self . maxThreads = maxThreads
2018-06-13 03:43:39 +00:00
self . _core = self . daemonInstance . _core
2018-06-12 23:32:33 +00:00
2018-06-13 03:43:39 +00:00
self . daemonInstance . timers . append ( self )
2018-06-12 23:32:33 +00:00
self . count = 0
2018-06-13 07:33:37 +00:00
2018-06-12 23:32:33 +00:00
def processTimer ( self ) :
2018-07-09 07:02:33 +00:00
2018-07-02 04:04:14 +00:00
# mark how many instances of a thread we have (decremented at thread end)
2018-06-13 07:33:37 +00:00
try :
self . daemonInstance . threadCounts [ self . timerFunction . __name__ ]
except KeyError :
self . daemonInstance . threadCounts [ self . timerFunction . __name__ ] = 0
2018-07-09 07:02:33 +00:00
# execute thread if it is time, and we are not missing *required* online peer
2018-06-12 23:32:33 +00:00
if self . count == self . frequency :
2018-07-09 07:02:33 +00:00
try :
if self . requiresPeer and len ( self . daemonInstance . onlinePeers ) == 0 :
raise onionrexceptions . OnlinePeerNeeded
except onionrexceptions . OnlinePeerNeeded :
pass
2018-06-13 03:43:39 +00:00
else :
2018-07-09 07:02:33 +00:00
if self . makeThread :
for i in range ( self . threadAmount ) :
if self . daemonInstance . threadCounts [ self . timerFunction . __name__ ] > = self . maxThreads :
2018-12-09 17:29:39 +00:00
logger . debug ( ' %s is currently using the maximum number of threads, not starting another. ' % self . timerFunction . __name__ )
2018-07-09 07:02:33 +00:00
else :
self . daemonInstance . threadCounts [ self . timerFunction . __name__ ] + = 1
newThread = threading . Thread ( target = self . timerFunction )
newThread . start ( )
else :
self . timerFunction ( )
self . count = - 1 # negative 1 because its incremented at bottom
self . count + = 1
2018-06-11 07:40:45 +00:00
2018-06-10 08:00:01 +00:00
shouldRun = False
debug = True
developmentMode = False
2018-06-14 04:17:58 +00:00
if config . get ( ' general.dev_mode ' , True ) :
2018-06-10 08:00:01 +00:00
developmentMode = True
try :
if sys . argv [ 1 ] == ' run ' :
shouldRun = True
except IndexError :
pass
if shouldRun :
try :
2018-06-11 07:40:45 +00:00
OnionrCommunicatorDaemon ( debug , developmentMode )
2018-06-12 23:32:33 +00:00
except Exception as e :
2018-06-14 04:17:58 +00:00
logger . error ( ' Error occured in Communicator ' , error = e , timestamp = False )