From 47013431d29d21c6be37844f15d137641dc983f7 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 24 Jul 2020 03:24:41 -0500 Subject: [PATCH] Removed use of communicator's storagecounter to reduce coupling --- CHANGELOG.md | 1 + src/communicator/__init__.py | 14 ++++---------- src/communicatorutils/downloadblocks/__init__.py | 6 +++--- src/communicatorutils/housekeeping.py | 5 ++++- src/communicatorutils/lookupblocks.py | 4 +++- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01910064..07b55756 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Made storagecounter use a watchdog (inotify) instead of excessive file reads * Bumped urllib3 to 1.25.10 +* Removed use of communicator's storagecounter to reduce coupling ## [5.0.0] - 2020-07-23 diff --git a/src/communicator/__init__.py b/src/communicator/__init__.py index c316275c..cfb8f2f7 100755 --- a/src/communicator/__init__.py +++ b/src/communicator/__init__.py @@ -3,9 +3,7 @@ This file contains both the OnionrCommunicate class for communcating with peers and code to operate as a daemon, getting commands from the command queue database -(see core.Core.daemonQueue) """ -import os import time import config @@ -24,14 +22,10 @@ from communicatorutils import announcenode, deniableinserts from communicatorutils import cooldownpeer from communicatorutils import housekeeping from communicatorutils import netcheck -from onionrutils import localcommand from onionrutils import epoch from onionrcommands.openwebinterface import get_url from etc import humanreadabletime import onionrservices -import filepaths -from onionrblocks import storagecounter -from coredb import dbfiles from netcontroller import NetController from . import bootstrappeers from . import daemoneventhooks @@ -62,7 +56,6 @@ class OnionrCommunicatorDaemon: # configure logger and stuff self.config = config - self.storage_counter = storagecounter.StorageCounter() self.isOnline = True # Assume we're connected to the internet self.shared_state = shared_state # TooManyObjects module @@ -153,11 +146,13 @@ class OnionrCommunicatorDaemon: # Timer to reset the longest offline peer # so contact can be attempted again OnionrCommunicatorTimers( - self, onlinepeers.clear_offline_peer, 58, my_args=[self], max_threads=1) + self, onlinepeers.clear_offline_peer, 58, my_args=[self], + max_threads=1) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers( - self, housekeeping.clean_old_blocks, 20, my_args=[self], max_threads=1) + self, housekeeping.clean_old_blocks, 20, my_args=[self], + max_threads=1) # Timer to discover new peers OnionrCommunicatorTimers( @@ -243,7 +238,6 @@ class OnionrCommunicatorDaemon: 'First run detected. Run openhome to get setup.', terminal=True) get_url() - while not config.get('onboarding.done', True) and \ not self.shutdown: diff --git a/src/communicatorutils/downloadblocks/__init__.py b/src/communicatorutils/downloadblocks/__init__.py index a2fca198..e7e865de 100755 --- a/src/communicatorutils/downloadblocks/__init__.py +++ b/src/communicatorutils/downloadblocks/__init__.py @@ -12,7 +12,7 @@ from gevent import spawn import onionrexceptions import logger import onionrpeers -import communicator + from communicator import peeraction from communicator import onlinepeers from onionrutils import blockmetadata @@ -39,11 +39,12 @@ from . import shoulddownload along with this program. If not, see . """ +storage_counter = storagecounter.StorageCounter() + def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"): """Use communicator instance to download blocks in the comms's queue""" blacklist = onionrblacklist.OnionrBlackList() - storage_counter = storagecounter.StorageCounter() LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter count: int = 0 metadata_validation_result: bool = False @@ -51,7 +52,6 @@ def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"): for blockHash in list(comm_inst.blockQueue): count += 1 - triedQueuePeers = [] # List of peers we've tried for a block try: blockPeers = list(comm_inst.blockQueue[blockHash]) except KeyError: diff --git a/src/communicatorutils/housekeeping.py b/src/communicatorutils/housekeeping.py index d2cc870d..563029b5 100755 --- a/src/communicatorutils/housekeeping.py +++ b/src/communicatorutils/housekeeping.py @@ -12,6 +12,7 @@ from coredb import blockmetadb, dbfiles import onionrstorage from onionrstorage import removeblock from onionrblocks import onionrblacklist +from onionrblocks.storagecounter import StorageCounter """ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -27,6 +28,8 @@ from onionrblocks import onionrblacklist along with this program. If not, see . """ +storage_counter = StorageCounter() + def __remove_from_upload(comm_inst, block_hash: str): try: @@ -46,7 +49,7 @@ def clean_old_blocks(comm_inst): __remove_from_upload(comm_inst, bHash) logger.info('Deleted block: %s' % (bHash,)) - while comm_inst.storage_counter.is_full(): + while storage_counter.is_full(): try: oldest = blockmetadb.get_block_list()[0] except IndexError: diff --git a/src/communicatorutils/lookupblocks.py b/src/communicatorutils/lookupblocks.py index d97be65b..f86bdfef 100755 --- a/src/communicatorutils/lookupblocks.py +++ b/src/communicatorutils/lookupblocks.py @@ -14,6 +14,7 @@ from onionrblocks import onionrblacklist import onionrexceptions import config from etc import onionrvalues +from onionrblocks.storagecounter import StorageCounter """ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,6 +31,7 @@ from etc import onionrvalues """ blacklist = onionrblacklist.OnionrBlackList() +storage_counter = StorageCounter() def lookup_blocks_from_communicator(comm_inst): @@ -51,7 +53,7 @@ def lookup_blocks_from_communicator(comm_inst): if not comm_inst.isOnline: break # check if disk allocation is used - if comm_inst.storage_counter.is_full(): + if storage_counter.is_full(): logger.debug( 'Not looking up new blocks due to maximum amount of disk used') break