work on user connections

This commit is contained in:
Kevin Froman 2018-08-31 17:53:48 -05:00
parent 716fb7335c
commit 1b16c809fd
3 changed files with 55 additions and 7 deletions

View File

@ -51,6 +51,8 @@ class OnionrCommunicatorDaemon:
# lists of connected peers and peers we know we can't reach currently # lists of connected peers and peers we know we can't reach currently
self.onlinePeers = [] self.onlinePeers = []
self.offlinePeers = [] self.offlinePeers = []
self.cooldownPeer = {}
self.connectTimes = {}
self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances) self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances)
# amount of threads running by name, used to prevent too many # amount of threads running by name, used to prevent too many
@ -84,13 +86,14 @@ class OnionrCommunicatorDaemon:
# requiresPeer True means the timer function won't fire if we have no connected peers # requiresPeer True means the timer function won't fire if we have no connected peers
OnionrCommunicatorTimers(self, self.daemonCommands, 5) OnionrCommunicatorTimers(self, self.daemonCommands, 5)
OnionrCommunicatorTimers(self, self.detectAPICrash, 5) OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60) peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1)
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True) OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True)
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65) OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
OnionrCommunicatorTimers(self, self.daemonTools.cooldownPeer, 30, requiresPeer=True)
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600) netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1) announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
@ -295,7 +298,7 @@ class OnionrCommunicatorDaemon:
'''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected''' '''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected'''
logger.info('Refreshing peer pool.') logger.info('Refreshing peer pool.')
maxPeers = config.get('peers.maxConnect') maxPeers = int(config.get('peers.maxConnect'))
needed = maxPeers - len(self.onlinePeers) needed = maxPeers - len(self.onlinePeers)
for i in range(needed): for i in range(needed):
@ -338,7 +341,7 @@ class OnionrCommunicatorDaemon:
for address in peerList: for address in peerList:
if not config.get('tor.v3onions') and len(address) == 62: if not config.get('tor.v3onions') and len(address) == 62:
continue continue
if len(address) == 0 or address in tried or address in self.onlinePeers: if len(address) == 0 or address in tried or address in self.onlinePeers or address in self.cooldownPeer:
continue continue
if self.shutdown: if self.shutdown:
return return
@ -347,6 +350,7 @@ class OnionrCommunicatorDaemon:
time.sleep(0.1) time.sleep(0.1)
if address not in self.onlinePeers: if address not in self.onlinePeers:
self.onlinePeers.append(address) self.onlinePeers.append(address)
self.connectTimes[address] = self._core._utils.getEpoch()
retData = address retData = address
# add peer to profile list if they're not in it # add peer to profile list if they're not in it
@ -361,6 +365,17 @@ class OnionrCommunicatorDaemon:
logger.debug('Failed to connect to ' + address) logger.debug('Failed to connect to ' + address)
return retData return retData
def removeOnlinePeer(self, peer):
'''Remove an online peer'''
try:
del self.connectTimes[peer]
except KeyError:
pass
try:
self.onlinePeers.remove(peer)
except ValueError:
pass
def peerCleanup(self): def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)''' '''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peerCleanup(self._core) onionrpeers.peerCleanup(self._core)
@ -392,7 +407,7 @@ class OnionrCommunicatorDaemon:
if retData == False: if retData == False:
try: try:
self.getPeerProfileInstance(peer).addScore(-10) self.getPeerProfileInstance(peer).addScore(-10)
self.onlinePeers.remove(peer) self.removeOnlinePeer(peer)
self.getOnlinePeers() # Will only add a new peer to pool if needed self.getOnlinePeers() # Will only add a new peer to pool if needed
except ValueError: except ValueError:
pass pass

View File

@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import onionrexceptions, onionrpeers, onionrproofs, base64, logger import onionrexceptions, onionrpeers, onionrproofs, base64, logger, secrets
class DaemonTools: class DaemonTools:
def __init__(self, daemon): def __init__(self, daemon):
self.daemon = daemon self.daemon = daemon
@ -70,4 +70,35 @@ class DaemonTools:
self.daemon._core._blacklist.addToDB(oldest) self.daemon._core._blacklist.addToDB(oldest)
self.daemon._core.removeBlock(oldest) self.daemon._core.removeBlock(oldest)
logger.info('Deleted block: %s' % (oldest,)) logger.info('Deleted block: %s' % (oldest,))
self.daemon.decrementThreadCount('cleanOldBlocks') self.daemon.decrementThreadCount('cleanOldBlocks')
def cooldownPeer(self):
'''Randomly add an online peer to cooldown, so we can connect a new one'''
onlinePeerAmount = len(self.daemon.onlinePeers)
minTime = 300
cooldownTime = 600
toCool = ''
tempConnectTimes = dict(self.daemon.connectTimes)
# Remove peers from cooldown that have been there long enough
tempCooldown = dict(self.daemon.cooldownPeer)
for peer in tempCooldown:
if (self.daemon._core._utils.getEpoch() - tempCooldown[peer]) >= cooldownTime:
del self.daemon.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= self.daemon._core.config.get('peers.maxConnect'):
finding = True
while finding:
try:
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
if (self.daemon._core._utils.getEpoch() - tempConnectTimes[toCool]) < minTime:
del tempConnectTimes[toCool]
else:
finding = False
except ValueError:
break
else:
self.daemon.removeOnlinePeer(toCool)
self.daemon.cooldownPeer[toCool] = self.daemon._core._utils.getEpoch()
self.daemon.decrementThreadCount('cooldownPeer')

View File

@ -90,13 +90,15 @@ def peerCleanup(coreInst):
if PeerProfiles(address, coreInst).score < minScore: if PeerProfiles(address, coreInst).score < minScore:
coreInst.removeAddress(address) coreInst.removeAddress(address)
try: try:
if (coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 'dateSeen')) >= 600: if (int(coreInst._utils.getEpoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
expireTime = 600 expireTime = 600
else: else:
expireTime = 86400 expireTime = 86400
coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime) coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime)
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
pass pass
except ValueError:
pass
logger.warn('Removed address ' + address + '.') logger.warn('Removed address ' + address + '.')
# Unban probably not malicious peers TODO improve # Unban probably not malicious peers TODO improve