2018-06-10 08:00:01 +00:00
#!/usr/bin/env python3
'''
2019-06-12 00:05:15 +00:00
Onionr - Private P2P Communication
2018-06-10 08:00:01 +00:00
This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon , getting commands from the command queue database ( see core . Core . daemonQueue )
'''
'''
This program is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < https : / / www . gnu . org / licenses / > .
'''
2019-06-11 06:08:44 +00:00
import sys , os , time
import core , config , logger , onionr
2018-07-31 04:41:32 +00:00
import onionrexceptions , onionrpeers , onionrevents as events , onionrplugins as plugins , onionrblockapi as block
2019-06-13 06:58:17 +00:00
from communicatorutils import servicecreator , onionrcommunicatortimers
2019-06-11 06:08:44 +00:00
from communicatorutils import downloadblocks , lookupblocks , lookupadders
2019-05-09 05:27:15 +00:00
from communicatorutils import servicecreator , connectnewpeers , uploadblocks
2019-06-13 06:58:17 +00:00
from communicatorutils import daemonqueuehandler , announcenode , deniableinserts
from communicatorutils import cooldownpeer , housekeeping , netcheck
2019-06-25 23:07:35 +00:00
from onionrutils import localcommand , epoch , basicrequests
2019-06-13 06:58:17 +00:00
from etc import humanreadabletime
2019-03-19 05:09:53 +00:00
import onionrservices , onionr , onionrproofs
2019-02-11 23:44:39 +00:00
2019-02-28 03:02:44 +00:00
OnionrCommunicatorTimers = onionrcommunicatortimers . OnionrCommunicatorTimers
2019-01-20 18:09:53 +00:00
config . reload ( )
2018-06-11 07:40:45 +00:00
class OnionrCommunicatorDaemon :
2019-01-20 18:09:53 +00:00
def __init__ ( self , onionrInst , proxyPort , developmentMode = config . get ( ' general.dev_mode ' , False ) ) :
onionrInst . communicatorInst = self
2018-11-11 03:25:40 +00:00
# configure logger and stuff
onionr . Onionr . setupConfig ( ' data/ ' , self = self )
2019-01-20 18:09:53 +00:00
self . proxyPort = proxyPort
2018-06-14 04:35:56 +00:00
2018-08-21 20:01:50 +00:00
self . isOnline = True # Assume we're connected to the internet
2018-07-01 21:01:19 +00:00
# list of timer instances
2018-06-12 23:32:33 +00:00
self . timers = [ ]
2018-07-01 21:01:19 +00:00
2019-02-12 19:18:08 +00:00
# initialize core with Tor socks port being 3rd argument
2019-01-20 18:09:53 +00:00
self . proxyPort = proxyPort
2019-01-20 22:54:04 +00:00
self . _core = onionrInst . onionrCore
2018-07-01 21:01:19 +00:00
2018-11-13 17:07:46 +00:00
self . blocksToUpload = [ ]
2018-07-01 21:01:19 +00:00
# loop time.sleep delay in seconds
2018-06-12 23:32:33 +00:00
self . delay = 1
2018-07-01 21:01:19 +00:00
# lists of connected peers and peers we know we can't reach currently
2018-06-13 22:22:48 +00:00
self . onlinePeers = [ ]
2018-06-16 20:54:56 +00:00
self . offlinePeers = [ ]
2018-08-31 22:53:48 +00:00
self . cooldownPeer = { }
self . connectTimes = { }
2018-07-27 03:07:50 +00:00
self . peerProfiles = [ ] # list of peer's profiles (onionrpeers.PeerProfile instances)
2019-02-12 19:18:08 +00:00
self . newPeers = [ ] # Peers merged to us. Don't add to db until we know they're reachable
2019-06-13 02:35:30 +00:00
self . announceProgress = { }
self . announceCache = { }
2018-06-13 22:22:48 +00:00
2018-07-01 21:01:19 +00:00
# amount of threads running by name, used to prevent too many
2018-06-13 07:33:37 +00:00
self . threadCounts = { }
2018-07-06 04:27:12 +00:00
2019-02-12 19:18:08 +00:00
# set true when shutdown command received
2018-06-13 03:43:39 +00:00
self . shutdown = False
2018-06-15 05:45:07 +00:00
2018-07-01 21:01:19 +00:00
# list of new blocks to download, added to when new block lists are fetched from peers
2019-01-22 17:40:27 +00:00
self . blockQueue = { }
2018-07-06 04:27:12 +00:00
2018-07-13 21:02:41 +00:00
# list of blocks currently downloading, avoid s
self . currentDownloading = [ ]
2018-12-09 17:29:39 +00:00
# timestamp when the last online node was seen
self . lastNodeSeen = None
2019-01-16 05:57:47 +00:00
# Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time
self . dbTimestamps = { }
2018-06-13 03:43:39 +00:00
# Clear the daemon queue for any dead messages
2018-06-13 07:33:37 +00:00
if os . path . exists ( self . _core . queueDB ) :
self . _core . clearDaemonQueue ( )
# Loads in and starts the enabled plugins
plugins . reload ( )
2019-01-20 18:09:53 +00:00
# time app started running for info/statistics purposes
2019-06-25 23:07:35 +00:00
self . startTime = epoch . get_epoch ( )
2018-09-15 01:05:25 +00:00
2019-01-20 18:09:53 +00:00
if developmentMode :
2018-10-30 22:22:06 +00:00
OnionrCommunicatorTimers ( self , self . heartbeat , 30 )
2018-07-06 04:27:12 +00:00
2018-07-02 04:04:14 +00:00
# Set timers, function reference, seconds
2018-08-03 06:28:26 +00:00
# requiresPeer True means the timer function won't fire if we have no connected peers
2018-08-31 22:53:48 +00:00
peerPoolTimer = OnionrCommunicatorTimers ( self , self . getOnlinePeers , 60 , maxThreads = 1 )
2019-01-20 02:23:26 +00:00
OnionrCommunicatorTimers ( self , self . runCheck , 2 , maxThreads = 1 )
2019-05-11 18:32:56 +00:00
# Timers to periodically lookup new blocks and download them
2019-06-16 20:56:22 +00:00
OnionrCommunicatorTimers ( self , self . lookupBlocks , self . _core . config . get ( ' timers.lookupBlocks ' , 25 ) , requiresPeer = True , maxThreads = 1 )
OnionrCommunicatorTimers ( self , self . getBlocks , self . _core . config . get ( ' timers.getBlocks ' , 30 ) , requiresPeer = True , maxThreads = 2 )
2019-05-11 18:32:56 +00:00
# Timer to reset the longest offline peer so contact can be attempted again
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . clearOfflinePeer , 58 )
2019-05-11 18:32:56 +00:00
# Timer to cleanup old blocks
2019-06-13 06:58:17 +00:00
blockCleanupTimer = OnionrCommunicatorTimers ( self , housekeeping . clean_old_blocks , 65 , myArgs = [ self ] )
2019-05-11 18:32:56 +00:00
# Timer to discover new peers
2018-07-19 22:32:21 +00:00
OnionrCommunicatorTimers ( self , self . lookupAdders , 60 , requiresPeer = True )
2019-05-11 18:32:56 +00:00
# Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers
2019-06-13 06:58:17 +00:00
OnionrCommunicatorTimers ( self , cooldownpeer . cooldown_peer , 30 , myArgs = [ self ] , requiresPeer = True )
2019-05-11 18:32:56 +00:00
# Timer to read the upload queue and upload the entries to peers
2019-06-14 18:02:02 +00:00
OnionrCommunicatorTimers ( self , self . uploadBlock , 5 , requiresPeer = True , maxThreads = 1 )
2019-05-11 18:32:56 +00:00
# Timer to process the daemon command queue
2019-06-13 02:35:30 +00:00
OnionrCommunicatorTimers ( self , self . daemonCommands , 6 , maxThreads = 3 )
2019-05-11 18:32:56 +00:00
# Timer that kills Onionr if the API server crashes
2019-06-18 15:02:54 +00:00
#OnionrCommunicatorTimers(self, self.detectAPICrash, 30, maxThreads=1)
2019-05-11 18:32:56 +00:00
# Setup direct connections
2019-03-24 02:56:46 +00:00
if config . get ( ' general.socket_servers ' , False ) :
self . services = onionrservices . OnionrServices ( self . _core )
self . active_services = [ ]
2019-03-26 04:25:46 +00:00
self . service_greenlets = [ ]
2019-06-13 06:58:17 +00:00
OnionrCommunicatorTimers ( self , servicecreator . service_creator , 5 , maxThreads = 50 , myArgs = [ self ] )
2019-03-24 02:56:46 +00:00
else :
self . services = None
2019-05-11 18:32:56 +00:00
# This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata
2019-06-13 02:35:30 +00:00
if config . get ( ' general.insert_deniable_blocks ' , True ) :
2019-06-13 06:58:17 +00:00
deniableBlockTimer = OnionrCommunicatorTimers ( self , deniableinserts . insert_deniable_block , 180 , myArgs = [ self ] , requiresPeer = True , maxThreads = 1 )
2019-06-13 02:35:30 +00:00
deniableBlockTimer . count = ( deniableBlockTimer . frequency - 175 )
2018-12-09 17:29:39 +00:00
2019-05-11 18:32:56 +00:00
# Timer to check for connectivity, through Tor to various high-profile onion services
2019-06-13 06:58:17 +00:00
netCheckTimer = OnionrCommunicatorTimers ( self , netcheck . net_check , 600 , myArgs = [ self ] )
2019-05-11 18:32:56 +00:00
# Announce the public API server transport address to other nodes if security level allows
2019-06-18 04:33:22 +00:00
if config . get ( ' general.security_level ' , 1 ) == 0 and config . get ( ' general.announce_node ' , True ) :
2019-05-11 18:32:56 +00:00
# Default to high security level incase config breaks
2019-06-13 02:35:30 +00:00
announceTimer = OnionrCommunicatorTimers ( self , announcenode . announce_node , 3600 , myArgs = [ self ] , requiresPeer = True , maxThreads = 1 )
2018-12-09 17:29:39 +00:00
announceTimer . count = ( announceTimer . frequency - 120 )
else :
logger . debug ( ' Will not announce node. ' )
2019-05-11 18:32:56 +00:00
# Timer to delete malfunctioning or long-dead peers
2018-08-03 06:28:26 +00:00
cleanupTimer = OnionrCommunicatorTimers ( self , self . peerCleanup , 300 , requiresPeer = True )
2019-05-11 18:32:56 +00:00
# Timer to cleanup dead ephemeral forward secrecy keys
2019-06-13 06:58:17 +00:00
forwardSecrecyTimer = OnionrCommunicatorTimers ( self , housekeeping . clean_keys , 15 , myArgs = [ self ] , maxThreads = 1 )
2018-07-09 07:02:33 +00:00
2019-05-11 18:32:56 +00:00
# Adjust initial timer triggers
2018-07-23 07:43:10 +00:00
peerPoolTimer . count = ( peerPoolTimer . frequency - 1 )
2018-08-03 06:28:26 +00:00
cleanupTimer . count = ( cleanupTimer . frequency - 60 )
2019-03-15 16:48:06 +00:00
blockCleanupTimer . count = ( blockCleanupTimer . frequency - 5 )
2018-06-13 03:43:39 +00:00
2018-07-02 04:04:14 +00:00
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
2018-07-08 00:26:01 +00:00
try :
while not self . shutdown :
for i in self . timers :
if self . shutdown :
break
i . processTimer ( )
time . sleep ( self . delay )
2019-01-18 05:34:13 +00:00
# Debug to print out used FDs (regular and net)
#proc = psutil.Process()
#print(proc.open_files(), len(psutil.net_connections()))
2018-07-08 00:26:01 +00:00
except KeyboardInterrupt :
self . shutdown = True
pass
2018-07-06 04:27:12 +00:00
2019-06-19 20:29:27 +00:00
logger . info ( ' Goodbye. (Onionr is cleaning up, and will exit) ' , terminal = True )
2019-03-27 18:55:43 +00:00
try :
self . service_greenlets
except AttributeError :
pass
else :
for server in self . service_greenlets :
server . stop ( )
2019-06-23 17:41:07 +00:00
localcommand . local_command ( self . _core , ' shutdown ' ) # shutdown the api
2018-07-08 00:26:01 +00:00
time . sleep ( 0.5 )
2018-07-23 07:43:10 +00:00
2018-06-23 07:36:22 +00:00
def lookupAdders ( self ) :
''' Lookup new peer addresses '''
2019-05-11 18:32:56 +00:00
lookupadders . lookup_new_peer_transports_with_communicator ( self )
2018-07-02 04:04:14 +00:00
2018-06-15 05:45:07 +00:00
def lookupBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' Lookup new blocks & add them to download queue '''
2019-05-09 05:27:15 +00:00
lookupblocks . lookup_blocks_from_communicator ( self )
2018-06-15 05:45:07 +00:00
def getBlocks ( self ) :
2018-07-02 04:04:14 +00:00
''' download new blocks in queue '''
2019-05-09 05:27:15 +00:00
downloadblocks . download_blocks_from_communicator ( self )
def decrementThreadCount ( self , threadName ) :
''' Decrement amount of a thread name if more than zero, called when a function meant to be run in a thread ends '''
try :
if self . threadCounts [ threadName ] > 0 :
self . threadCounts [ threadName ] - = 1
except KeyError :
pass
2018-06-15 05:45:07 +00:00
def pickOnlinePeer ( self ) :
''' randomly picks peer from pool without bias (using secrets module) '''
retData = ' '
while True :
peerLength = len ( self . onlinePeers )
2018-06-16 20:54:56 +00:00
if peerLength < = 0 :
break
2018-06-15 05:45:07 +00:00
try :
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
retData = self . onlinePeers [ self . _core . _crypto . secrets . randbelow ( peerLength ) ]
except IndexError :
pass
else :
break
return retData
2018-06-21 07:24:58 +00:00
def clearOfflinePeer ( self ) :
''' Removes the longest offline peer to retry later '''
try :
2018-06-22 00:57:12 +00:00
removed = self . offlinePeers . pop ( 0 )
2018-06-21 07:24:58 +00:00
except IndexError :
pass
2018-06-22 00:57:12 +00:00
else :
2018-07-06 04:27:12 +00:00
logger . debug ( ' Removed ' + removed + ' from offline list, will try them again. ' )
2018-06-21 07:24:58 +00:00
self . decrementThreadCount ( ' clearOfflinePeer ' )
2018-06-13 22:22:48 +00:00
def getOnlinePeers ( self ) :
2018-11-11 03:25:40 +00:00
'''
Manages the self . onlinePeers attribute list , connects to more peers if we have none connected
'''
2018-07-02 04:04:14 +00:00
2018-11-11 03:25:40 +00:00
logger . debug ( ' Refreshing peer pool... ' )
2018-09-24 23:48:00 +00:00
maxPeers = int ( config . get ( ' peers.max_connect ' , 10 ) )
2018-06-13 22:22:48 +00:00
needed = maxPeers - len ( self . onlinePeers )
for i in range ( needed ) :
2018-07-03 08:18:07 +00:00
if len ( self . onlinePeers ) == 0 :
self . connectNewPeer ( useBootstrap = True )
2018-08-02 20:18:01 +00:00
else :
self . connectNewPeer ( )
2018-12-09 17:29:39 +00:00
2018-07-09 07:02:33 +00:00
if self . shutdown :
break
else :
if len ( self . onlinePeers ) == 0 :
2019-06-19 20:29:27 +00:00
logger . debug ( ' Couldn \' t connect to any peers. ' + ( ' Last node seen %s ago. ' % humanreadabletime . human_readable_time ( time . time ( ) - self . lastNodeSeen ) if not self . lastNodeSeen is None else ' ' ) , terminal = True )
2018-12-09 17:29:39 +00:00
else :
self . lastNodeSeen = time . time ( )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' getOnlinePeers ' )
2018-07-03 08:18:07 +00:00
def addBootstrapListToPeerList ( self , peerList ) :
2018-11-11 03:25:40 +00:00
'''
Add the bootstrap list to the peer list ( no duplicates )
'''
2018-07-01 21:01:19 +00:00
for i in self . _core . bootstrapList :
2018-11-10 07:22:27 +00:00
if i not in peerList and i not in self . offlinePeers and i != self . _core . hsAddress and len ( str ( i ) . strip ( ) ) > 0 :
2018-07-01 21:01:19 +00:00
peerList . append ( i )
2018-08-11 05:23:59 +00:00
self . _core . addAddress ( i )
2018-07-01 21:01:19 +00:00
2018-07-03 08:18:07 +00:00
def connectNewPeer ( self , peer = ' ' , useBootstrap = False ) :
2018-06-13 22:22:48 +00:00
''' Adds a new random online peer to self.onlinePeers '''
2019-05-09 05:27:15 +00:00
connectnewpeers . connect_new_peer_to_communicator ( self , peer , useBootstrap )
2018-07-06 04:27:12 +00:00
2018-08-31 22:53:48 +00:00
def removeOnlinePeer ( self , peer ) :
''' Remove an online peer '''
try :
del self . connectTimes [ peer ]
except KeyError :
pass
2019-01-16 05:57:47 +00:00
try :
del self . dbTimestamps [ peer ]
except KeyError :
pass
2018-08-31 22:53:48 +00:00
try :
self . onlinePeers . remove ( peer )
except ValueError :
pass
2018-08-02 07:28:26 +00:00
def peerCleanup ( self ) :
''' This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow) '''
onionrpeers . peerCleanup ( self . _core )
2018-08-03 06:28:26 +00:00
self . decrementThreadCount ( ' peerCleanup ' )
2018-08-02 07:28:26 +00:00
2018-06-15 05:45:07 +00:00
def printOnlinePeers ( self ) :
''' logs online peer list '''
if len ( self . onlinePeers ) == 0 :
2019-06-19 20:29:27 +00:00
logger . warn ( ' No online peers ' , terminal = True )
2018-07-03 21:24:14 +00:00
else :
2019-06-19 20:29:27 +00:00
logger . info ( ' Online peers: ' , terminal = True )
2018-07-03 21:24:14 +00:00
for i in self . onlinePeers :
2018-07-30 22:48:29 +00:00
score = str ( self . getPeerProfileInstance ( i ) . score )
2019-06-19 20:29:27 +00:00
logger . info ( i + ' , score: ' + score , terminal = True )
2018-06-15 05:45:07 +00:00
2019-03-11 05:10:37 +00:00
def peerAction ( self , peer , action , data = ' ' , returnHeaders = False ) :
2018-07-30 00:37:12 +00:00
''' Perform a get request to a peer '''
2018-06-21 07:17:20 +00:00
if len ( peer ) == 0 :
return False
2018-11-15 20:47:35 +00:00
#logger.debug('Performing ' + action + ' with ' + peer + ' on port ' + str(self.proxyPort))
2018-12-22 19:02:09 +00:00
url = ' http:// %s / %s ' % ( peer , action )
2018-06-16 20:54:56 +00:00
if len ( data ) > 0 :
url + = ' &data= ' + data
2018-08-02 20:18:01 +00:00
2019-06-25 23:07:35 +00:00
self . _core . setAddressInfo ( peer , ' lastConnectAttempt ' , epoch . get_epoch ( ) ) # mark the time we're trying to request this peer
2018-08-02 20:18:01 +00:00
2019-06-25 23:07:35 +00:00
retData = basicrequests . do_get_request ( self . _core , url , port = self . proxyPort )
2018-07-02 04:04:14 +00:00
# if request failed, (error), mark peer offline
2018-06-15 05:45:07 +00:00
if retData == False :
2018-06-15 19:09:41 +00:00
try :
2018-08-01 07:22:22 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( - 10 )
2018-08-31 22:53:48 +00:00
self . removeOnlinePeer ( peer )
2019-06-20 21:59:36 +00:00
if action != ' ping ' and not self . shutdown :
2019-06-19 20:29:27 +00:00
logger . warn ( ' Lost connection to ' + peer , terminal = True )
2018-09-01 03:29:57 +00:00
self . getOnlinePeers ( ) # Will only add a new peer to pool if needed
2018-06-15 19:09:41 +00:00
except ValueError :
pass
2018-07-30 22:48:29 +00:00
else :
2019-06-25 23:07:35 +00:00
self . _core . setAddressInfo ( peer , ' lastConnect ' , epoch . get_epoch ( ) )
2018-07-30 22:48:29 +00:00
self . getPeerProfileInstance ( peer ) . addScore ( 1 )
2019-03-11 05:10:37 +00:00
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
2018-09-24 23:48:00 +00:00
2018-07-30 22:48:29 +00:00
def getPeerProfileInstance ( self , peer ) :
''' Gets a peer profile instance from the list of profiles, by address name '''
for i in self . peerProfiles :
# if the peer's profile is already loaded, return that
if i . address == peer :
retData = i
break
else :
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
retData = onionrpeers . PeerProfiles ( peer , self . _core )
2018-06-15 05:45:07 +00:00
return retData
2019-01-20 22:54:04 +00:00
def getUptime ( self ) :
2019-06-25 23:07:35 +00:00
return epoch . get_epoch ( ) - self . startTime
2019-01-20 22:54:04 +00:00
2018-06-12 23:32:33 +00:00
def heartbeat ( self ) :
2018-06-13 03:43:39 +00:00
''' Show a heartbeat debug message '''
2019-06-13 06:58:17 +00:00
logger . debug ( ' Heartbeat. Node running for %s . ' % humanreadabletime . human_readable_time ( self . getUptime ( ) ) )
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' heartbeat ' )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def daemonCommands ( self ) :
2018-11-11 03:25:40 +00:00
'''
Process daemon commands from daemonQueue
'''
2019-05-11 18:32:56 +00:00
daemonqueuehandler . handle_daemon_commands ( self )
2018-06-13 22:22:48 +00:00
2018-07-23 07:43:10 +00:00
def uploadBlock ( self ) :
2018-07-27 03:07:50 +00:00
''' Upload our block to a few peers '''
2019-05-09 05:27:15 +00:00
uploadblocks . upload_blocks_from_communicator ( self )
2018-07-23 07:43:10 +00:00
2018-06-13 07:33:37 +00:00
def announce ( self , peer ) :
2018-07-02 04:04:14 +00:00
''' Announce to peers our address '''
2019-06-13 02:35:30 +00:00
if announcenode . announce_node ( self ) == False :
2019-06-19 20:29:27 +00:00
logger . warn ( ' Could not introduce node. ' , terminal = True )
2018-06-13 07:33:37 +00:00
2018-06-13 03:43:39 +00:00
def detectAPICrash ( self ) :
''' exit if the api server crashes/stops '''
2019-06-23 17:41:07 +00:00
if localcommand . local_command ( self . _core , ' ping ' , silent = False ) not in ( ' pong ' , ' pong! ' ) :
2019-06-17 20:51:00 +00:00
for i in range ( 300 ) :
2019-06-23 17:41:07 +00:00
if localcommand . local_command ( self . _core , ' ping ' ) in ( ' pong ' , ' pong! ' ) or self . shutdown :
2018-06-13 03:43:39 +00:00
break # break for loop
time . sleep ( 1 )
else :
# This executes if the api is NOT detected to be running
2019-06-15 18:56:57 +00:00
events . event ( ' daemon_crash ' , onionr = self . _core . onionrInst , data = { } )
2019-06-19 20:29:27 +00:00
logger . fatal ( ' Daemon detected API crash (or otherwise unable to reach API after long time), stopping... ' , terminal = True )
2018-06-13 03:43:39 +00:00
self . shutdown = True
2018-06-13 22:22:48 +00:00
self . decrementThreadCount ( ' detectAPICrash ' )
2018-06-13 07:33:37 +00:00
2018-11-10 07:17:19 +00:00
def runCheck ( self ) :
2019-06-13 06:58:17 +00:00
if run_file_exists ( self ) :
2018-11-10 07:17:19 +00:00
logger . debug ( ' Status check; looks good. ' )
self . decrementThreadCount ( ' runCheck ' )
2019-01-20 18:09:53 +00:00
def startCommunicator ( onionrInst , proxyPort ) :
2019-06-13 06:58:17 +00:00
OnionrCommunicatorDaemon ( onionrInst , proxyPort )
def run_file_exists ( daemon ) :
if os . path . isfile ( daemon . _core . dataDir + ' .runcheck ' ) :
os . remove ( daemon . _core . dataDir + ' .runcheck ' )
return True
return False