2018-01-07 08:55:44 +00:00
#!/usr/bin/env python3
'''
2018-05-15 06:43:29 +00:00
Onionr - P2P Microblogging Platform & Social network .
2018-01-17 23:37:53 +00:00
2018-05-15 06:43:29 +00:00
This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon , getting commands from the command queue database ( see core . Core . daemonQueue )
2018-01-14 00:07:13 +00:00
'''
'''
2018-01-07 08:55:44 +00:00
This program is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < https : / / www . gnu . org / licenses / > .
'''
2018-05-03 22:41:12 +00:00
import sqlite3 , requests , hmac , hashlib , time , sys , os , math , logger , urllib . parse , base64 , binascii , random , json , threading
2018-05-15 04:28:12 +00:00
import core , onionrutils , onionrcrypto , netcontroller , onionrproofs , config , onionrplugins as plugins
2018-05-19 22:11:51 +00:00
from onionrblockapi import Block
2018-02-04 03:44:29 +00:00
2018-01-07 08:55:44 +00:00
class OnionrCommunicate :
2018-01-15 08:03:13 +00:00
def __init__ ( self , debug , developmentMode ) :
2018-02-04 03:44:29 +00:00
'''
OnionrCommunicate
2018-01-14 00:07:13 +00:00
2018-02-04 03:44:29 +00:00
This class handles communication with nodes in the Onionr network .
2018-01-14 00:07:13 +00:00
'''
2018-03-03 04:19:01 +00:00
2018-01-13 09:03:51 +00:00
self . _core = core . Core ( )
2018-01-27 01:16:15 +00:00
self . _utils = onionrutils . OnionrUtils ( self . _core )
2018-02-21 02:44:56 +00:00
self . _crypto = onionrcrypto . OnionrCrypto ( self . _core )
2018-04-22 01:53:12 +00:00
self . _netController = netcontroller . NetController ( 0 ) # arg is the HS port but not needed rn in this file
2018-04-02 00:33:09 +00:00
2018-04-23 06:03:10 +00:00
self . newHashes = { } # use this to not keep hashes around too long if we cant get their data
2018-04-25 22:42:42 +00:00
self . keepNewHash = 12
2018-04-23 06:03:10 +00:00
self . ignoredHashes = [ ]
2018-04-02 00:33:09 +00:00
self . highFailureAmount = 7
2018-05-03 22:41:12 +00:00
2018-05-05 02:07:43 +00:00
self . communicatorThreads = 0
self . maxThreads = 75
2018-05-05 03:39:00 +00:00
self . processBlocksThreads = 0
self . lookupBlocksThreads = 0
2018-05-03 22:41:12 +00:00
self . blocksProcessing = [ ] # list of blocks currently processing, to avoid trying a block twice at once in 2 seperate threads
self . peerStatus = { } # network actions (active requests) for peers used mainly to prevent conflicting actions in threads
2018-03-03 07:37:46 +00:00
2018-05-18 06:22:16 +00:00
self . communicatorTimers = { } # communicator timers, name: rate (in seconds)
self . communicatorTimerCounts = { }
self . communicatorTimerFuncs = { }
self . registerTimer ( ' blockProcess ' , 20 )
self . registerTimer ( ' highFailure ' , 10 )
self . registerTimer ( ' heartBeat ' , 10 )
self . registerTimer ( ' pex ' , 120 )
2018-01-26 07:22:48 +00:00
logger . debug ( ' Communicator debugging enabled. ' )
2018-05-18 06:22:16 +00:00
with open ( ' data/hs/hostname ' , ' r ' ) as torID :
todID = torID . read ( )
2018-01-20 07:23:09 +00:00
2018-04-22 01:53:12 +00:00
apiRunningCheckRate = 10
apiRunningCheckCount = 0
2018-02-04 09:20:43 +00:00
self . peerData = { } # Session data for peers (recent reachability, speed, etc)
2018-01-27 03:42:20 +00:00
if os . path . exists ( self . _core . queueDB ) :
self . _core . clearDaemonQueue ( )
2018-03-03 04:19:01 +00:00
# Loads in and starts the enabled plugins
plugins . reload ( )
2018-06-05 05:26:11 +00:00
# Print nice header thing :)
2018-06-14 04:17:58 +00:00
if config . get ( ' general.display_header ' , True ) :
2018-06-05 05:26:11 +00:00
self . header ( )
2018-01-14 08:48:23 +00:00
while True :
command = self . _core . daemonQueue ( )
2018-01-25 22:39:09 +00:00
# Process blocks based on a timer
2018-05-19 22:11:51 +00:00
self . timerTick ( )
2018-05-18 06:22:16 +00:00
# TODO: migrate below if statements to be own functions which are called in the above timerTick() function
if self . communicatorTimers [ ' highFailure ' ] == self . communicatorTimerCounts [ ' highFailure ' ] :
self . communicatorTimerCounts [ ' highFailure ' ] = 0
2018-04-02 00:33:09 +00:00
for i in self . peerData :
2018-04-25 23:21:43 +00:00
if self . peerData [ i ] [ ' failCount ' ] > = self . highFailureAmount :
2018-04-02 00:33:09 +00:00
self . peerData [ i ] [ ' failCount ' ] - = 1
2018-05-18 06:22:16 +00:00
if self . communicatorTimers [ ' pex ' ] == self . communicatorTimerCounts [ ' pex ' ] :
2018-05-03 22:41:12 +00:00
pT1 = threading . Thread ( target = self . getNewPeers , name = " pT1 " )
pT1 . start ( )
pT2 = threading . Thread ( target = self . getNewPeers , name = " pT2 " )
pT2 . start ( )
2018-05-18 06:22:16 +00:00
self . communicatorTimerCounts [ ' pex ' ] = 0 # TODO: do not reset timer if low peer count
if self . communicatorTimers [ ' heartBeat ' ] == self . communicatorTimerCounts [ ' heartBeat ' ] :
2018-01-27 21:49:48 +00:00
logger . debug ( ' Communicator heartbeat ' )
2018-05-18 06:22:16 +00:00
self . communicatorTimerCounts [ ' heartBeat ' ] = 0
if self . communicatorTimers [ ' blockProcess ' ] == self . communicatorTimerCounts [ ' blockProcess ' ] :
2018-05-05 06:19:25 +00:00
lT1 = threading . Thread ( target = self . lookupBlocks , name = " lt1 " , args = ( True , ) )
lT2 = threading . Thread ( target = self . lookupBlocks , name = " lt2 " , args = ( True , ) )
lT3 = threading . Thread ( target = self . lookupBlocks , name = " lt3 " , args = ( True , ) )
lT4 = threading . Thread ( target = self . lookupBlocks , name = " lt4 " , args = ( True , ) )
pbT1 = threading . Thread ( target = self . processBlocks , name = ' pbT1 ' , args = ( True , ) )
pbT2 = threading . Thread ( target = self . processBlocks , name = ' pbT2 ' , args = ( True , ) )
pbT3 = threading . Thread ( target = self . processBlocks , name = ' pbT3 ' , args = ( True , ) )
pbT4 = threading . Thread ( target = self . processBlocks , name = ' pbT4 ' , args = ( True , ) )
2018-05-05 02:07:43 +00:00
if ( self . maxThreads - 8 ) > = threading . active_count ( ) :
lT1 . start ( )
lT2 . start ( )
lT3 . start ( )
lT4 . start ( )
pbT1 . start ( )
pbT2 . start ( )
pbT3 . start ( )
pbT4 . start ( )
2018-05-18 06:22:16 +00:00
self . communicatorTimerCounts [ ' blockProcess ' ] = 0
2018-05-05 02:07:43 +00:00
else :
logger . debug ( threading . active_count ( ) )
logger . debug ( ' Too many threads. ' )
2018-01-14 08:48:23 +00:00
if command != False :
if command [ 0 ] == ' shutdown ' :
2018-05-03 03:22:07 +00:00
logger . info ( ' Daemon received exit command. ' , timestamp = True )
2018-01-14 08:48:23 +00:00
break
2018-04-19 02:56:25 +00:00
elif command [ 0 ] == ' announceNode ' :
announceAttempts = 3
announceAttemptCount = 0
2018-04-19 01:17:47 +00:00
announceVal = False
2018-06-01 04:25:28 +00:00
logger . info ( ' Announcing node to %s ... ' % command [ 1 ] , timestamp = True )
2018-04-19 02:56:25 +00:00
while not announceVal :
announceAttemptCount + = 1
announceVal = self . performGet ( ' announce ' , command [ 1 ] , data = self . _core . hsAdder . replace ( ' \n ' , ' ' ) , skipHighFailureAddress = True )
2018-06-01 04:25:28 +00:00
# logger.info(announceVal)
2018-04-19 02:56:25 +00:00
if announceAttemptCount > = announceAttempts :
2018-06-01 04:25:28 +00:00
logger . warn ( ' Unable to announce to %s ' % command [ 1 ] )
2018-04-19 02:56:25 +00:00
break
2018-05-02 06:22:40 +00:00
elif command [ 0 ] == ' runCheck ' :
2018-06-01 04:25:28 +00:00
logger . debug ( ' Status check; looks good. ' )
2018-05-02 06:22:40 +00:00
open ( ' data/.runcheck ' , ' w+ ' ) . close ( )
2018-05-15 05:16:00 +00:00
elif command [ 0 ] == ' kex ' :
self . pexCount = pexTimer - 1
2018-05-03 03:22:07 +00:00
elif command [ 0 ] == ' event ' :
# todo
pass
elif command [ 0 ] == ' checkCallbacks ' :
try :
data = json . loads ( command [ 1 ] )
logger . info ( ' Checking for callbacks with connection %s ... ' % data [ ' id ' ] )
2018-06-14 04:17:58 +00:00
self . check_callbacks ( data , config . get ( ' general.dc_execcallbacks ' , True ) )
2018-05-03 03:22:07 +00:00
events . event ( ' incoming_direct_connection ' , data = { ' callback ' : True , ' communicator ' : self , ' data ' : data } )
except Exception as e :
logger . error ( ' Failed to interpret callbacks for checking ' , e )
elif command [ 0 ] == ' incomingDirectConnection ' :
try :
data = json . loads ( command [ 1 ] )
logger . info ( ' Handling incoming connection %s ... ' % data [ ' id ' ] )
self . incoming_direct_connection ( data )
events . event ( ' incoming_direct_connection ' , data = { ' callback ' : False , ' communicator ' : self , ' data ' : data } )
except Exception as e :
logger . error ( ' Failed to handle callbacks for checking ' , e )
2018-05-02 06:50:29 +00:00
2018-04-22 01:53:12 +00:00
apiRunningCheckCount + = 1
2018-05-02 06:50:29 +00:00
2018-04-22 01:53:12 +00:00
# check if local API is up
if apiRunningCheckCount > apiRunningCheckRate :
if self . _core . _utils . localCommand ( ' ping ' ) != ' pong ' :
for i in range ( 4 ) :
if self . _utils . localCommand ( ' ping ' ) == ' pong ' :
apiRunningCheckCount = 0
break # break for loop
time . sleep ( 1 )
else :
# This executes if the api is NOT detected to be running
2018-05-03 03:22:07 +00:00
logger . error ( ' Daemon detected API crash (or otherwise unable to reach API after long time), stopping... ' )
2018-04-22 01:53:12 +00:00
break # break main daemon loop
apiRunningCheckCount = 0
2018-04-19 01:47:35 +00:00
2018-01-14 08:48:23 +00:00
time . sleep ( 1 )
2018-05-02 06:50:29 +00:00
2018-04-22 01:53:12 +00:00
self . _netController . killTor ( )
2018-01-07 08:55:44 +00:00
return
2018-03-03 04:19:01 +00:00
2018-05-03 03:22:07 +00:00
future_callbacks = { }
connection_handlers = { }
id_peer_cache = { }
2018-05-18 06:22:16 +00:00
def registerTimer ( self , timerName , rate , timerFunc = None ) :
2018-06-01 04:25:28 +00:00
'''
Register a communicator timer
'''
2018-05-18 06:22:16 +00:00
self . communicatorTimers [ timerName ] = rate
self . communicatorTimerCounts [ timerName ] = 0
self . communicatorTimerFuncs [ timerName ] = timerFunc
2018-05-19 22:11:51 +00:00
2018-05-18 06:22:16 +00:00
def timerTick ( self ) :
2018-06-01 04:25:28 +00:00
'''
Increments timers " ticks " and calls funcs if applicable
'''
2018-05-18 06:22:16 +00:00
tName = ' '
for i in self . communicatorTimers . items ( ) :
tName = i [ 0 ]
self . communicatorTimerCounts [ tName ] + = 1
if self . communicatorTimerCounts [ tName ] == self . communicatorTimers [ tName ] :
try :
self . communicatorTimerFuncs [ tName ] ( )
except TypeError :
pass
else :
self . communicatorTimerCounts [ tName ] = 0
2018-05-03 03:22:07 +00:00
def get_connection_handlers ( self , name = None ) :
'''
Returns a list of callback handlers by name , or , if name is None , it returns all handlers .
'''
if name is None :
return self . connection_handlers
elif name in self . connection_handlers :
return self . connection_handlers [ name ]
2018-05-04 17:44:35 +00:00
else :
2018-05-03 03:22:07 +00:00
return list ( )
def add_connection_handler ( self , name , handler ) :
'''
Adds a function to be called when an connection that is NOT a callback is received .
Takes in the name of the communication type and the handler as input
'''
if not name in self . connection_handlers :
self . connection_handlers [ name ] = list ( )
self . connection_handlers [ name ] . append ( handler )
return
def remove_connection_handler ( self , name , handler = None ) :
'''
Removes a connection handler if specified , or removes all by name
'''
if handler is None :
if name in self . connection_handlers :
self . connection_handlers [ name ] . remove ( handler )
elif name in self . connection_handlers :
del self . connection_handlers [ name ]
return
def set_callback ( self , identifier , callback ) :
'''
( Over ) writes a callback by communication identifier
'''
if not callback is None :
self . future_callbacks [ identifier ] = callback
return True
return False
def unset_callback ( self , identifier ) :
'''
Unsets a callback by communication identifier , if set
'''
if identifier in future_callbacks :
del self . future_callbacks [ identifier ]
return True
return False
def get_callback ( self , identifier ) :
'''
Returns a callback by communication identifier if set , or None
'''
if identifier in self . future_callbacks :
return self . future_callbacks [ id ]
return None
def direct_connect ( self , peer , data = None , callback = None , log = True ) :
'''
Communicates something directly with the client
- ` peer ` should obviously be the peer id to request .
- ` data ` should be a dict ( NOT str ) , with the parameter " type "
ex . { ' type ' : ' sendMessage ' , ' content ' : ' hey, this is a dm ' }
In that dict , the key ' token ' must NEVER be set . If it is , it will
be overwritten .
- if ` callback ` is set to a function , it will call that function
back if / when the client the request is sent to decides to respond .
Do NOT depend on a response , because users can configure their
clients not to respond to this type of request .
- ` log ` is set to True by default - - what this does is log the
request for debug purposes . Should be False for sensitive actions .
'''
# TODO: Timing attack prevention
try :
# does not need to be secure random, only used for keeping track of async responses
# Actually, on second thought, it does need to be secure random. Otherwise, if it is predictable, someone could trigger arbitrary callbacks that have been saved on the local node, wrecking all kinds of havoc. Better just to keep it secure random.
identifier = self . _utils . token ( 32 )
if ' id ' in data :
identifier = data [ ' id ' ]
if not identifier in id_peer_cache :
id_peer_cache [ identifier ] = peer
if type ( data ) == str :
# if someone inputs a string instead of a dict, it will assume it's the type
data = { ' type ' : data }
data [ ' id ' ] = identifier
data [ ' token ' ] = ' ' # later put PoW stuff here or whatever is needed
data_str = json . dumps ( data )
events . event ( ' outgoing_direct_connection ' , data = { ' callback ' : True , ' communicator ' : self , ' data ' : data , ' id ' : identifier , ' token ' : token , ' peer ' : peer , ' callback ' : callback , ' log ' : log } )
2018-05-11 03:19:48 +00:00
logger . debug ( ' Direct connection (identifier: " %s " ): %s ' % ( identifier , data_str ) )
2018-05-03 03:22:07 +00:00
try :
self . performGet ( ' directMessage ' , peer , data_str )
except :
logger . warn ( ' Failed to connect to peer: " %s " . ' % str ( peer ) )
return False
if not callback is None :
self . set_callback ( identifier , callback )
return True
except Exception as e :
logger . warn ( ' Unknown error, failed to execute direct connect (peer: " %s " ). ' % str ( peer ) , e )
return False
def direct_connect_response ( self , identifier , data , peer = None , callback = None , log = True ) :
'''
Responds to a previous connection . Hostname will be pulled from id_peer_cache if not specified in ` peer ` parameter .
If yet another callback is requested , it can be put in the ` callback ` parameter .
'''
2018-06-14 04:17:58 +00:00
if config . get ( ' general.dc_response ' , True ) :
2018-05-03 03:22:07 +00:00
data [ ' id ' ] = identifier
data [ ' sender ' ] = open ( ' data/hs/hostname ' ) . read ( )
data [ ' callback ' ] = True
if ( origin is None ) and ( identifier in id_peer_cache ) :
origin = id_peer_cache [ identifier ]
if not identifier in id_peer_cache :
id_peer_cache [ identifier ] = peer
if origin is None :
logger . warn ( ' Failed to identify peer for connection %s ' % str ( identifier ) )
return False
else :
return self . direct_connect ( peer , data = data , callback = callback , log = log )
else :
logger . warn ( ' Node tried to respond to direct connection id %s , but it was rejected due to `dc_response` restriction. ' % str ( identifier ) )
return False
def check_callbacks ( self , data , execute = True , remove = True ) :
'''
Check if a callback is set , and if so , execute it
'''
try :
if type ( data ) is str :
data = json . loads ( data )
if ' id ' in data : # TODO: prevent enumeration, require extra PoW
identifier = data [ ' id ' ]
if identifier in self . future_callbacks :
if execute :
self . get_callback ( identifier ) ( data )
logger . debug ( ' Request callback " %s " executed. ' % str ( identifier ) )
if remove :
self . unset_callback ( identifier )
return True
logger . warn ( ' Unable to find request callback for ID " %s " . ' % str ( identifier ) )
else :
logger . warn ( ' Unable to identify callback request, `id` parameter missing: %s ' % json . dumps ( data ) )
except Exception as e :
logger . warn ( ' Unknown error, failed to execute direct connection callback (peer: " %s " ). ' % str ( peer ) , e )
return False
def incoming_direct_connection ( self , data ) :
'''
This code is run whenever there is a new incoming connection .
'''
if ' type ' in data and data [ ' type ' ] in self . connection_handlers :
for handler in self . get_connection_handlers ( name ) :
handler ( data )
return
2018-03-01 09:20:57 +00:00
def getNewPeers ( self ) :
'''
2018-05-03 22:41:12 +00:00
Get new peers and ed25519 keys
2018-03-01 09:20:57 +00:00
'''
2018-05-03 03:22:07 +00:00
2018-05-05 03:39:00 +00:00
peersCheck = 1 # Amount of peers to ask for new peers + keys
2018-03-16 15:35:37 +00:00
peersChecked = 0
peerList = list ( self . _core . listAdders ( ) ) # random ordered list of peers
newKeys = [ ]
newAdders = [ ]
2018-03-16 20:38:33 +00:00
if len ( peerList ) > 0 :
maxN = len ( peerList ) - 1
else :
peersCheck = 0
maxN = 0
2018-03-16 15:35:37 +00:00
if len ( peerList ) > peersCheck :
peersCheck = len ( peerList )
while peersCheck > peersChecked :
2018-04-25 07:09:28 +00:00
#i = secrets.randbelow(maxN) # cant use prior to 3.6
i = random . randint ( 0 , maxN )
2018-05-03 22:41:12 +00:00
try :
if self . peerStatusTaken ( peerList [ i ] , ' pex ' ) or self . peerStatusTaken ( peerList [ i ] , ' kex ' ) :
continue
except IndexError :
pass
2018-05-11 03:19:48 +00:00
logger . info ( ' Using %s to find new peers... ' % peerList [ i ] , timestamp = True )
2018-05-03 03:22:07 +00:00
2018-03-16 15:35:37 +00:00
try :
2018-04-01 05:46:34 +00:00
newAdders = self . performGet ( ' pex ' , peerList [ i ] , skipHighFailureAddress = True )
2018-05-11 03:19:48 +00:00
if not newAdders is False : # keep the is False thing in there, it might not be bool
logger . debug ( ' Attempting to merge address: %s ' % str ( newAdders ) )
self . _utils . mergeAdders ( newAdders )
2018-03-16 15:35:37 +00:00
except requests . exceptions . ConnectionError :
2018-05-11 03:19:48 +00:00
logger . info ( ' %s connection failed ' % peerList [ i ] , timestamp = True )
2018-03-16 15:35:37 +00:00
continue
else :
try :
2018-05-11 03:19:48 +00:00
logger . info ( ' Using %s to find new keys... ' % peerList [ i ] )
2018-04-01 05:46:34 +00:00
newKeys = self . performGet ( ' kex ' , peerList [ i ] , skipHighFailureAddress = True )
2018-05-11 03:19:48 +00:00
logger . debug ( ' Attempting to merge pubkey: %s ' % str ( newKeys ) )
2018-03-16 15:35:37 +00:00
# TODO: Require keys to come with POW token (very large amount of POW)
self . _utils . mergeKeys ( newKeys )
except requests . exceptions . ConnectionError :
2018-05-11 03:19:48 +00:00
logger . info ( ' %s connection failed ' % peerList [ i ] , timestamp = True )
2018-03-16 15:35:37 +00:00
continue
else :
peersChecked + = 1
2018-03-01 09:20:57 +00:00
return
2018-05-05 03:39:00 +00:00
def lookupBlocks ( self , isThread = False ) :
2018-02-04 03:44:29 +00:00
'''
Lookup blocks and merge new ones
'''
2018-05-05 03:39:00 +00:00
if isThread :
self . lookupBlocksThreads + = 1
2018-02-22 09:33:30 +00:00
peerList = self . _core . listAdders ( )
2018-05-13 06:37:47 +00:00
blockList = list ( )
2018-05-03 03:22:07 +00:00
2018-01-26 06:28:11 +00:00
for i in peerList :
2018-05-03 22:41:12 +00:00
if self . peerStatusTaken ( i , ' getBlockHashes ' ) or self . peerStatusTaken ( i , ' getDBHash ' ) :
continue
2018-04-25 22:42:42 +00:00
try :
if self . peerData [ i ] [ ' failCount ' ] > = self . highFailureAmount :
continue
except KeyError :
pass
2018-05-03 03:22:07 +00:00
2018-02-28 00:00:37 +00:00
lastDB = self . _core . getAddressInfo ( i , ' DBHash ' )
2018-05-03 03:22:07 +00:00
2018-01-28 02:03:44 +00:00
if lastDB == None :
2018-06-07 08:15:01 +00:00
logger . debug ( ' Fetching db hash from %s , no previous known. ' % str ( i ) )
2018-01-28 02:03:44 +00:00
else :
2018-06-07 08:15:01 +00:00
logger . debug ( ' Fetching db hash from %s , %s last known ' % ( str ( i ) , str ( lastDB ) ) )
2018-05-03 03:22:07 +00:00
2018-01-26 06:28:11 +00:00
currentDB = self . performGet ( ' getDBHash ' , i )
2018-05-03 03:22:07 +00:00
2018-01-28 22:29:16 +00:00
if currentDB != False :
2018-05-11 03:19:48 +00:00
logger . debug ( ' %s hash db (from request): %s ' % ( str ( i ) , str ( currentDB ) ) )
2018-01-28 22:29:16 +00:00
else :
2018-05-11 03:19:48 +00:00
logger . warn ( ' Failed to get hash db status for %s ' % str ( i ) )
2018-05-03 03:22:07 +00:00
2018-01-27 01:16:15 +00:00
if currentDB != False :
if lastDB != currentDB :
2018-05-11 03:19:48 +00:00
logger . debug ( ' Fetching hash from %s - %s current hash. ' % ( str ( i ) , currentDB ) )
2018-04-19 02:03:44 +00:00
try :
2018-05-19 21:32:21 +00:00
blockList . extend ( self . performGet ( ' getBlockHashes ' , i ) . split ( ' \n ' ) )
2018-04-19 02:03:44 +00:00
except TypeError :
2018-05-11 03:19:48 +00:00
logger . warn ( ' Failed to get data hash from %s ' % str ( i ) )
2018-04-25 23:21:43 +00:00
self . peerData [ i ] [ ' failCount ' ] - = 1
2018-01-28 01:53:24 +00:00
if self . _utils . validateHash ( currentDB ) :
2018-02-28 00:00:37 +00:00
self . _core . setAddressInfo ( i , " DBHash " , currentDB )
2018-05-03 03:22:07 +00:00
2018-05-13 06:37:47 +00:00
if len ( blockList ) != 0 :
2018-04-27 01:37:48 +00:00
pass
2018-05-03 03:22:07 +00:00
2018-01-26 06:28:11 +00:00
for i in blockList :
2018-01-29 02:54:39 +00:00
if len ( i . strip ( ) ) == 0 :
continue
2018-05-15 00:44:54 +00:00
try :
if self . _utils . hasBlock ( i ) :
continue
except :
logger . warn ( ' Invalid hash ' ) # TODO: move below validate hash check below
pass
2018-04-23 06:14:49 +00:00
if i in self . ignoredHashes :
continue
2018-05-03 03:22:07 +00:00
2018-04-19 02:18:38 +00:00
#logger.debug('Exchanged block (blockList): ' + i)
2018-01-27 01:16:15 +00:00
if not self . _utils . validateHash ( i ) :
2018-01-26 06:28:11 +00:00
# skip hash if it isn't valid
2018-05-11 03:19:48 +00:00
logger . warn ( ' Hash %s is not valid ' % str ( i ) )
2018-01-26 06:28:11 +00:00
continue
else :
2018-04-23 06:03:10 +00:00
self . newHashes [ i ] = 0
2018-05-11 03:19:48 +00:00
logger . debug ( ' Adding %s to hash database... ' % str ( i ) )
2018-01-26 06:28:11 +00:00
self . _core . addToBlockDB ( i )
2018-05-05 03:39:00 +00:00
self . lookupBlocksThreads - = 1
2018-01-26 06:28:11 +00:00
return
2018-02-04 03:44:29 +00:00
2018-05-05 03:39:00 +00:00
def processBlocks ( self , isThread = False ) :
2018-01-28 21:59:13 +00:00
'''
2018-02-04 03:44:29 +00:00
Work with the block database and download any missing blocks
This is meant to be called from the communicator daemon on its timer .
2018-01-28 21:59:13 +00:00
'''
2018-05-05 03:39:00 +00:00
if isThread :
self . processBlocksThreads + = 1
2018-05-13 03:55:34 +00:00
for i in self . _core . getBlockList ( unsaved = True ) :
2018-01-28 21:59:13 +00:00
if i != " " :
2018-05-03 22:41:12 +00:00
if i in self . blocksProcessing or i in self . ignoredHashes :
2018-05-05 02:07:43 +00:00
#logger.debug('already processing ' + i)
2018-04-23 06:03:10 +00:00
continue
2018-05-03 22:41:12 +00:00
else :
self . blocksProcessing . append ( i )
2018-04-23 06:03:10 +00:00
try :
self . newHashes [ i ]
except KeyError :
self . newHashes [ i ] = 0
2018-05-03 03:22:07 +00:00
2018-04-23 06:03:10 +00:00
# check if a new hash has been around too long, delete it from database and add it to ignore list
if self . newHashes [ i ] > = self . keepNewHash :
2018-05-11 03:19:48 +00:00
logger . warn ( ' Ignoring block %s because it took to long to get valid data. ' % str ( i ) )
2018-04-23 06:03:10 +00:00
del self . newHashes [ i ]
self . _core . removeBlock ( i )
self . ignoredHashes . append ( i )
continue
2018-05-03 03:22:07 +00:00
2018-04-23 06:03:10 +00:00
self . newHashes [ i ] + = 1
2018-05-11 03:19:48 +00:00
logger . warn ( ' Block is unsaved: %s ' % str ( i ) )
2018-01-28 22:14:19 +00:00
data = self . downloadBlock ( i )
2018-04-26 07:40:39 +00:00
2018-05-11 05:18:39 +00:00
# if block was successfully gotten (hash already verified)
2018-04-23 06:03:10 +00:00
if data :
2018-04-26 07:40:39 +00:00
del self . newHashes [ i ] # remove from probation list
# deal with block metadata
blockContent = self . _core . getData ( i )
2018-05-05 20:07:32 +00:00
try :
blockContent = blockContent . encode ( )
except AttributeError :
pass
2018-04-26 07:40:39 +00:00
try :
2018-04-27 01:15:30 +00:00
#blockMetadata = json.loads(self._core.getData(i)).split('}')[0] + '}'
2018-05-13 03:55:34 +00:00
blockMetadata = json . loads ( blockContent [ : blockContent . find ( b ' \n ' ) ] . decode ( ) )
2018-04-27 01:37:48 +00:00
try :
2018-05-05 08:03:05 +00:00
blockMeta2 = json . loads ( blockMetadata [ ' meta ' ] )
except KeyError :
blockMeta2 = { ' type ' : ' ' }
2018-04-27 01:37:48 +00:00
pass
2018-05-13 03:55:34 +00:00
blockContent = blockContent [ blockContent . find ( b ' \n ' ) + 1 : ]
2018-05-05 23:32:10 +00:00
try :
blockContent = blockContent . decode ( )
except AttributeError :
pass
2018-05-11 03:19:48 +00:00
2018-05-13 03:45:32 +00:00
if not self . _crypto . verifyPow ( blockContent , blockMeta2 ) :
2018-05-11 03:19:48 +00:00
logger . warn ( " %s has invalid or insufficient proof of work token, deleting... " % str ( i ) )
2018-05-05 21:50:15 +00:00
self . _core . removeBlock ( i )
continue
2018-04-26 07:40:39 +00:00
else :
2018-05-14 04:11:31 +00:00
if ( ( ' sig ' in blockMetadata ) and ( ' id ' in blockMeta2 ) ) : # id doesn't exist in blockMeta2, so this won't workin the first place
2018-05-15 06:16:55 +00:00
2018-05-13 03:45:32 +00:00
#blockData = json.dumps(blockMetadata['meta']) + blockMetadata[blockMetadata.rfind(b'}') + 1:]
creator = self . _utils . getPeerByHashId ( blockMeta2 [ ' id ' ] )
try :
creator = creator . decode ( )
except AttributeError :
pass
2018-05-14 04:11:31 +00:00
if self . _core . _crypto . edVerify ( blockMetadata [ ' meta ' ] + blockContent , creator , blockMetadata [ ' sig ' ] , encodedData = True ) :
2018-05-13 03:45:32 +00:00
logger . info ( ' %s was signed ' % str ( i ) )
self . _core . updateBlockInfo ( i , ' sig ' , ' true ' )
else :
logger . warn ( ' %s has an invalid signature ' % str ( i ) )
self . _core . updateBlockInfo ( i , ' sig ' , ' false ' )
2018-04-26 07:40:39 +00:00
try :
2018-05-11 03:19:48 +00:00
logger . info ( ' Block type is %s ' % str ( blockMeta2 [ ' type ' ] ) )
2018-05-05 08:03:05 +00:00
self . _core . updateBlockInfo ( i , ' dataType ' , blockMeta2 [ ' type ' ] )
2018-05-04 19:38:47 +00:00
self . removeBlockFromProcessingList ( i )
self . removeBlockFromProcessingList ( i )
2018-04-26 07:40:39 +00:00
except KeyError :
2018-04-27 01:15:30 +00:00
logger . warn ( ' Block has no type ' )
2018-04-26 07:40:39 +00:00
pass
except json . decoder . JSONDecodeError :
2018-04-27 01:15:30 +00:00
logger . warn ( ' Could not decode block metadata ' )
2018-05-04 19:38:47 +00:00
self . removeBlockFromProcessingList ( i )
2018-05-05 03:39:00 +00:00
self . processBlocksThreads - = 1
2018-01-28 21:59:13 +00:00
return
2018-02-04 03:44:29 +00:00
2018-05-04 19:38:47 +00:00
def removeBlockFromProcessingList ( self , block ) :
2018-06-01 04:25:28 +00:00
'''
Remove a block from the processing list
'''
2018-05-19 21:32:21 +00:00
try :
self . blocksProcessing . remove ( block )
except ValueError :
return False
else :
return True
2018-05-04 19:38:47 +00:00
2018-04-25 22:42:42 +00:00
def downloadBlock ( self , hash , peerTries = 3 ) :
2018-02-04 03:44:29 +00:00
'''
Download a block from random order of peers
'''
2018-05-03 03:22:07 +00:00
2018-04-23 06:03:10 +00:00
retVal = False
2018-02-22 09:33:30 +00:00
peerList = self . _core . listAdders ( )
2018-01-28 22:14:19 +00:00
blocks = ' '
2018-04-25 22:42:42 +00:00
peerTryCount = 0
2018-05-03 03:22:07 +00:00
2018-01-28 22:14:19 +00:00
for i in peerList :
2018-05-05 02:09:54 +00:00
try :
if self . peerData [ i ] [ ' failCount ' ] > = self . highFailureAmount :
continue
except KeyError :
pass
2018-04-25 22:42:42 +00:00
if peerTryCount > = peerTries :
break
2018-05-03 03:22:07 +00:00
2018-01-28 22:14:19 +00:00
hasher = hashlib . sha3_256 ( )
2018-04-25 04:04:12 +00:00
data = self . performGet ( ' getData ' , i , hash , skipHighFailureAddress = True )
2018-05-03 03:22:07 +00:00
2018-04-23 06:03:10 +00:00
if data == False or len ( data ) > 10000000 or data == ' ' :
2018-04-25 22:42:42 +00:00
peerTryCount + = 1
2018-04-23 01:59:44 +00:00
continue
2018-05-03 03:22:07 +00:00
2018-04-23 01:43:17 +00:00
try :
2018-04-23 02:13:55 +00:00
data = base64 . b64decode ( data )
2018-04-23 01:43:17 +00:00
except binascii . Error :
data = b ' '
2018-05-03 03:22:07 +00:00
2018-04-23 01:56:20 +00:00
hasher . update ( data )
2018-01-29 02:02:16 +00:00
digest = hasher . hexdigest ( )
2018-05-03 03:22:07 +00:00
2018-01-29 02:12:36 +00:00
if type ( digest ) is bytes :
2018-01-29 02:02:16 +00:00
digest = digest . decode ( )
2018-05-03 03:22:07 +00:00
2018-01-29 02:02:16 +00:00
if digest == hash . strip ( ) :
2018-01-28 22:14:19 +00:00
self . _core . setData ( data )
2018-05-11 03:19:48 +00:00
logger . info ( ' Successfully obtained data for %s ' % str ( hash ) , timestamp = True )
2018-04-23 06:03:10 +00:00
retVal = True
2018-04-27 01:15:30 +00:00
break
2018-01-28 22:45:22 +00:00
else :
2018-05-11 03:19:48 +00:00
logger . warn ( " Failed to validate %s -- hash calculated was %s " % ( hash , digest ) )
2018-04-25 22:42:42 +00:00
peerTryCount + = 1
2018-01-26 07:22:48 +00:00
2018-04-23 06:03:10 +00:00
return retVal
2018-05-11 03:19:48 +00:00
2018-02-04 03:44:29 +00:00
def urlencode ( self , data ) :
'''
URL encodes the data
'''
return urllib . parse . quote_plus ( data )
2018-05-19 21:32:21 +00:00
def performGet ( self , action , peer , data = None , skipHighFailureAddress = False , selfCheck = True ) :
2018-02-04 03:44:29 +00:00
'''
Performs a request to a peer through Tor or i2p ( currently only Tor )
'''
2018-03-03 04:19:01 +00:00
2018-05-19 21:32:21 +00:00
if not peer . endswith ( ' .onion ' ) and not peer . endswith ( ' .onion/ ' ) and not peer . endswith ( ' .b32.i2p ' ) :
raise PeerError ( ' Currently only Tor/i2p .onion/.b32.i2p peers are supported. You must manually specify .onion/.b32.i2p ' )
2018-05-02 06:01:20 +00:00
2018-04-22 23:35:00 +00:00
if len ( self . _core . hsAdder . strip ( ) ) == 0 :
raise Exception ( " Could not perform self address check in performGet due to not knowing our address " )
if selfCheck :
if peer . replace ( ' / ' , ' ' ) == self . _core . hsAdder :
2018-05-11 03:19:48 +00:00
logger . warn ( ' Tried to performGet to own hidden service, but selfCheck was not set to false ' )
2018-04-22 23:35:00 +00:00
return
2018-02-04 09:20:43 +00:00
# Store peer in peerData dictionary (non permanent)
if not peer in self . peerData :
2018-05-18 06:22:16 +00:00
self . peerData [ peer ] = { ' connectCount ' : 0 , ' failCount ' : 0 , ' lastConnectTime ' : self . _utils . getEpoch ( ) }
2018-01-21 01:02:56 +00:00
socksPort = sys . argv [ 2 ]
2018-01-28 01:53:24 +00:00
''' We use socks5h to use tor as DNS '''
2018-05-19 21:32:21 +00:00
if peer . endswith ( ' onion ' ) :
proxies = { ' http ' : ' socks5h://127.0.0.1: ' + str ( socksPort ) , ' https ' : ' socks5h://127.0.0.1: ' + str ( socksPort ) }
elif peer . endswith ( ' b32.i2p ' ) :
proxies = { ' http ' : ' http://127.0.0.1:4444 ' }
2018-01-21 01:02:56 +00:00
headers = { ' user-agent ' : ' PyOnionr ' }
2018-02-04 05:22:34 +00:00
url = ' http:// ' + peer + ' /public/?action= ' + self . urlencode ( action )
2018-05-19 21:32:21 +00:00
2018-01-21 01:02:56 +00:00
if data != None :
2018-02-04 05:22:34 +00:00
url = url + ' &data= ' + self . urlencode ( data )
2018-01-24 05:28:43 +00:00
try :
2018-04-02 00:33:09 +00:00
if skipHighFailureAddress and self . peerData [ peer ] [ ' failCount ' ] > self . highFailureAmount :
2018-04-01 05:46:34 +00:00
retData = False
2018-05-11 03:19:48 +00:00
logger . debug ( ' Skipping %s because of high failure rate. ' % peer )
2018-04-01 05:46:34 +00:00
else :
2018-05-03 22:41:12 +00:00
self . peerStatus [ peer ] = action
2018-05-11 03:19:48 +00:00
logger . debug ( ' Contacting %s on port %s ' % ( peer , str ( socksPort ) ) )
2018-05-19 21:32:21 +00:00
try :
r = requests . get ( url , headers = headers , proxies = proxies , allow_redirects = False , timeout = ( 15 , 30 ) )
except ValueError :
proxies = { ' http ' : ' socks5://127.0.0.1: ' + str ( socksPort ) , ' https ' : ' socks5://127.0.0.1: ' + str ( socksPort ) }
r = requests . get ( url , headers = headers , proxies = proxies , allow_redirects = False , timeout = ( 15 , 30 ) )
2018-04-01 05:46:34 +00:00
retData = r . text
2018-01-28 01:53:24 +00:00
except requests . exceptions . RequestException as e :
2018-06-01 04:25:28 +00:00
logger . debug ( ' %s failed with peer %s ' % ( action , peer ) )
logger . debug ( ' Error: %s ' % str ( e ) )
2018-02-04 09:20:43 +00:00
retData = False
2018-02-22 06:42:02 +00:00
2018-02-04 09:20:43 +00:00
if not retData :
self . peerData [ peer ] [ ' failCount ' ] + = 1
else :
self . peerData [ peer ] [ ' connectCount ' ] + = 1
2018-04-02 00:33:09 +00:00
self . peerData [ peer ] [ ' failCount ' ] - = 1
2018-05-18 06:22:16 +00:00
self . peerData [ peer ] [ ' lastConnectTime ' ] = self . _utils . getEpoch ( )
self . _core . setAddressInfo ( peer , ' lastConnect ' , self . _utils . getEpoch ( ) )
2018-02-04 09:20:43 +00:00
return retData
2018-05-11 03:19:48 +00:00
2018-05-03 22:41:12 +00:00
def peerStatusTaken ( self , peer , status ) :
'''
2018-05-18 06:22:16 +00:00
Returns if we are currently performing a specific action with a peer .
2018-05-03 22:41:12 +00:00
'''
try :
if self . peerStatus [ peer ] == status :
return True
except KeyError :
pass
return False
2018-01-20 00:59:05 +00:00
2018-06-05 05:26:11 +00:00
def header ( self , message = logger . colors . fg . pink + logger . colors . bold + ' Onionr ' + logger . colors . reset + logger . colors . fg . pink + ' has started. ' ) :
if os . path . exists ( ' static-data/header.txt ' ) :
with open ( ' static-data/header.txt ' , ' rb ' ) as file :
# only to stdout, not file or log or anything
2018-06-05 06:17:17 +00:00
print ( file . read ( ) . decode ( ) . replace ( ' P ' , logger . colors . fg . pink ) . replace ( ' W ' , logger . colors . reset + logger . colors . bold ) . replace ( ' G ' , logger . colors . fg . green ) . replace ( ' \n ' , logger . colors . reset + ' \n ' ) )
2018-06-05 05:26:11 +00:00
logger . info ( logger . colors . fg . lightgreen + ' -> ' + str ( message ) + logger . colors . reset + logger . colors . fg . lightgreen + ' <- \n ' )
2018-01-14 00:07:13 +00:00
shouldRun = False
2018-01-27 01:16:15 +00:00
debug = True
2018-01-15 03:58:32 +00:00
developmentMode = False
2018-06-14 04:17:58 +00:00
if config . get ( ' general.dev_mode ' , True ) :
2018-01-15 03:58:32 +00:00
developmentMode = True
2018-01-14 00:07:13 +00:00
try :
if sys . argv [ 1 ] == ' run ' :
shouldRun = True
except IndexError :
pass
if shouldRun :
2018-01-15 08:03:13 +00:00
try :
OnionrCommunicate ( debug , developmentMode )
except KeyboardInterrupt :
2018-03-04 02:28:17 +00:00
sys . exit ( 1 )
2018-01-17 23:37:53 +00:00
pass