From 8163292ed92f1882c58025aee0340f1fa1a9f9ca Mon Sep 17 00:00:00 2001 From: Kevin Froman Date: Wed, 17 Jul 2019 17:41:33 -0500 Subject: [PATCH] progress in removing onionr core --- onionr/communicator/__init__.py | 3 +- .../communicatorutils/daemonqueuehandler.py | 3 +- onionr/core.py | 33 +-- onionr/coredb/daemonqueue/__init__.py | 34 ++- onionr/coredb/dbfiles.py | 7 +- onionr/coredb/keydb/addkeys.py | 2 +- onionr/coredb/keydb/listkeys.py | 5 +- onionr/dbcreator.py | 258 +++++++++--------- onionr/httpapi/apiutils/shutdown.py | 4 +- onionr/onionr.py | 5 - onionr/onionrcommands/__init__.py | 4 +- onionr/onionrcommands/daemonlaunch.py | 5 +- onionr/onionrcommands/onionrstatistics.py | 6 +- .../__init__.py} | 0 onionr/onionrpluginapi.py | 8 +- onionr/onionrusers/onionrusers.py | 1 + 16 files changed, 175 insertions(+), 203 deletions(-) rename onionr/{onionrcrypto.py => onionrcrypto/__init__.py} (100%) diff --git a/onionr/communicator/__init__.py b/onionr/communicator/__init__.py index ea471bf1..8d01dbfa 100755 --- a/onionr/communicator/__init__.py +++ b/onionr/communicator/__init__.py @@ -30,6 +30,7 @@ from communicatorutils import cooldownpeer, housekeeping, netcheck from onionrutils import localcommand, epoch from etc import humanreadabletime import onionrservices, onionr, onionrproofs +from coredb import daemonqueue OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers config.reload() @@ -84,7 +85,7 @@ class OnionrCommunicatorDaemon: # Clear the daemon queue for any dead messages if os.path.exists(self._core.queueDB): - self._core.clearDaemonQueue() + daemonqueue.clear_daemon_queue() # Loads in and starts the enabled plugins plugins.reload() diff --git a/onionr/communicatorutils/daemonqueuehandler.py b/onionr/communicatorutils/daemonqueuehandler.py index 7c922aa1..a3ac35b2 100755 --- a/onionr/communicatorutils/daemonqueuehandler.py +++ b/onionr/communicatorutils/daemonqueuehandler.py @@ -20,8 +20,9 @@ import logger import onionrevents as events from onionrutils import localcommand +from coredb import daemonqueue def handle_daemon_commands(comm_inst): - cmd = comm_inst._core.daemonQueue() + cmd = daemonqueue.daemon_queue() response = '' if cmd is not False: events.event('daemon_command', onionr = comm_inst._core.onionrInst, data = {'cmd' : cmd}) diff --git a/onionr/core.py b/onionr/core.py index 1f83b8f4..79afe9c9 100755 --- a/onionr/core.py +++ b/onionr/core.py @@ -62,7 +62,6 @@ class Core: self.requirements = onionrvalues.OnionrValues() self.torPort = torPort self.dataNonceFile = self.dataDir + 'block-nonces.dat' - self.dbCreate = dbcreator.DBCreator(self) self.forwardKeysFile = self.dataDir + 'forward-keys.db' self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5) self.storage_counter = storagecounter.StorageCounter(self) @@ -81,7 +80,7 @@ class Core: if not os.path.exists(self.blockDB): self.createBlockDB() if not os.path.exists(self.forwardKeysFile): - self.dbCreate.createForwardKeyDB() + dbcreator.createForwardKeyDB() if not os.path.exists(self.peerDB): self.createPeerDB() if not os.path.exists(self.addressDB): @@ -176,32 +175,6 @@ class Core: ''' return onionrstorage.getData(self, hash) - def daemonQueue(self): - ''' - Gives commands to the communication proccess/daemon by reading an sqlite3 database - - This function intended to be used by the client. Queue to exchange data between "client" and server. - ''' - return coredb.daemonqueue.daemon_queue(self) - - def daemonQueueAdd(self, command, data='', responseID=''): - ''' - Add a command to the daemon queue, used by the communication daemon (communicator.py) - ''' - return coredb.daemonqueue.daemon_queue_add(self, command, data, responseID) - - def daemonQueueGetResponse(self, responseID=''): - ''' - Get a response sent by communicator to the API, by requesting to the API - ''' - return coredb.daemonqueue.daemon_queue_get_response(self, responseID) - - def clearDaemonQueue(self): - ''' - Clear the daemon queue (somewhat dangerous) - ''' - return coredb.daemonqueue.clear_daemon_queue(self) - def listAdders(self, randomOrder=True, i2p=True, recent=0): ''' Return a list of addresses @@ -390,7 +363,7 @@ class Core: if localcommand.local_command(self, '/ping', maxWait=10) == 'pong!': if self.config.get('general.security_level', 1) == 0: localcommand.local_command(self, '/waitforshare/' + retData, post=True, maxWait=5) - self.daemonQueueAdd('uploadBlock', retData) + coredb.daemonqueue.daemon_queue_add('uploadBlock', retData) else: pass coredb.blockmetadb.add_to_block_DB(retData, selfInsert=True, dataSaved=True) @@ -408,7 +381,7 @@ class Core: Introduces our node into the network by telling X many nodes our HS address ''' if localcommand.local_command(self, '/ping', maxWait=10) == 'pong!': - self.daemonQueueAdd('announceNode') + coredb.daemonqueue.daemon_queue_add('announceNode') logger.info('Introduction command will be processed.', terminal=True) else: logger.warn('No running node detected. Cannot introduce.', terminal=True) \ No newline at end of file diff --git a/onionr/coredb/daemonqueue/__init__.py b/onionr/coredb/daemonqueue/__init__.py index b494a45a..f7ce458f 100644 --- a/onionr/coredb/daemonqueue/__init__.py +++ b/onionr/coredb/daemonqueue/__init__.py @@ -21,8 +21,10 @@ import sqlite3, os import onionrevents as events from onionrutils import localcommand, epoch +from .. import dbfiles +import dbcreator -def daemon_queue(core_inst): +def daemon_queue(): ''' Gives commands to the communication proccess/daemon by reading an sqlite3 database @@ -30,28 +32,26 @@ def daemon_queue(core_inst): ''' retData = False - if not os.path.exists(core_inst.queueDB): - core_inst.dbCreate.createDaemonDB() + if not os.path.exists(dbfiles.daemon_queue_db): + dbcreator.createDaemonDB() else: - conn = sqlite3.connect(core_inst.queueDB, timeout=30) + conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=30) c = conn.cursor() try: for row in c.execute('SELECT command, data, date, min(ID), responseID FROM commands group by id'): retData = row break except sqlite3.OperationalError: - core_inst.dbCreate.createDaemonDB() + dbcreator.createDaemonDB() else: if retData != False: c.execute('DELETE FROM commands WHERE id=?;', (retData[3],)) conn.commit() conn.close() - events.event('queue_pop', data = {'data': retData}, onionr = core_inst.onionrInst) - return retData -def daemon_queue_add(core_inst, command, data='', responseID=''): +def daemon_queue_add(command, data='', responseID=''): ''' Add a command to the daemon queue, used by the communication daemon (communicator.py) ''' @@ -59,7 +59,7 @@ def daemon_queue_add(core_inst, command, data='', responseID=''): retData = True date = epoch.get_epoch() - conn = sqlite3.connect(core_inst.queueDB, timeout=30) + conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=30) c = conn.cursor() t = (command, data, date, responseID) try: @@ -67,24 +67,23 @@ def daemon_queue_add(core_inst, command, data='', responseID=''): conn.commit() except sqlite3.OperationalError: retData = False - core_inst.daemonQueue() - events.event('queue_push', data = {'command': command, 'data': data}, onionr = core_inst.onionrInst) + daemon_queue() conn.close() return retData -def daemon_queue_get_response(core_inst, responseID=''): +def daemon_queue_get_response(responseID=''): ''' Get a response sent by communicator to the API, by requesting to the API ''' - assert len(responseID) > 0 - resp = localcommand.local_command(core_inst, 'queueResponse/' + responseID) + if len(responseID) > 0: raise ValueError('ResponseID should not be empty') + resp = localcommand.local_command(dbfiles.daemon_queue_db, 'queueResponse/' + responseID) return resp -def clear_daemon_queue(core_inst): +def clear_daemon_queue(): ''' Clear the daemon queue (somewhat dangerous) ''' - conn = sqlite3.connect(core_inst.queueDB, timeout=30) + conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=30) c = conn.cursor() try: @@ -93,5 +92,4 @@ def clear_daemon_queue(core_inst): except: pass - conn.close() - events.event('queue_clear', onionr = core_inst.onionrInst) \ No newline at end of file + conn.close() \ No newline at end of file diff --git a/onionr/coredb/dbfiles.py b/onionr/coredb/dbfiles.py index 47d82316..4dda0cd2 100644 --- a/onionr/coredb/dbfiles.py +++ b/onionr/coredb/dbfiles.py @@ -2,4 +2,9 @@ from utils import identifyhome home = identifyhome.identify_home() if not home.endswith('/'): home += '/' -block_meta_db = '%sblock-metadata.db' \ No newline at end of file +block_meta_db = '%sblock-metadata.db' % (home,) +block_data_db = '%sblocks/block-data.db' % (home,) +daemon_queue_db = '%sdaemon-queue.db' % (home,) +address_info_db = '%saddress.db' % (home,) +user_id_info_db = '%susers.db' % (home,) +forward_keys_db = '%sforward-keys.db' % (home,) \ No newline at end of file diff --git a/onionr/coredb/keydb/addkeys.py b/onionr/coredb/keydb/addkeys.py index ca352d17..7e096d5a 100644 --- a/onionr/coredb/keydb/addkeys.py +++ b/onionr/coredb/keydb/addkeys.py @@ -20,7 +20,7 @@ import sqlite3 import onionrevents as events from onionrutils import stringvalidators - +from . import listkeys def add_peer(core_inst, peerID, name=''): ''' Adds a public key to the key database (misleading function name) diff --git a/onionr/coredb/keydb/listkeys.py b/onionr/coredb/keydb/listkeys.py index 50aff637..71a4b541 100644 --- a/onionr/coredb/keydb/listkeys.py +++ b/onionr/coredb/keydb/listkeys.py @@ -20,14 +20,15 @@ import sqlite3 import logger from onionrutils import epoch -def list_peers(core_inst, randomOrder=True, getPow=False, trust=0): +from .. import dbfiles +def list_peers(randomOrder=True, getPow=False, trust=0): ''' Return a list of public keys (misleading function name) randomOrder determines if the list should be in a random order trust sets the minimum trust to list ''' - conn = sqlite3.connect(core_inst.peerDB, timeout=30) + conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=30) c = conn.cursor() payload = '' diff --git a/onionr/dbcreator.py b/onionr/dbcreator.py index 6c26fa97..e02b5685 100755 --- a/onionr/dbcreator.py +++ b/onionr/dbcreator.py @@ -17,140 +17,138 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . ''' +from coredb import dbfiles import sqlite3, os -class DBCreator: - def __init__(self, coreInst): - self.core = coreInst - def createAddressDB(self): - ''' - Generate the address database +def createAddressDB(): + ''' + Generate the address database - types: - 1: I2P b32 address - 2: Tor v2 (like facebookcorewwwi.onion) - 3: Tor v3 - ''' - conn = sqlite3.connect(self.core.addressDB) - c = conn.cursor() - c.execute('''CREATE TABLE adders( - address text, - type int, - knownPeer text, - speed int, - success int, - powValue text, - failure int, - lastConnect int, - lastConnectAttempt int, - trust int, - introduced int - ); - ''') - conn.commit() - conn.close() + types: + 1: I2P b32 address + 2: Tor v2 (like facebookcorewwwi.onion) + 3: Tor v3 + ''' + conn = sqlite3.connect(dbfiles.address_info_db) + c = conn.cursor() + c.execute('''CREATE TABLE adders( + address text, + type int, + knownPeer text, + speed int, + success int, + powValue text, + failure int, + lastConnect int, + lastConnectAttempt int, + trust int, + introduced int + ); + ''') + conn.commit() + conn.close() - def createPeerDB(self): - ''' - Generate the peer sqlite3 database and populate it with the peers table. - ''' - # generate the peer database - conn = sqlite3.connect(self.core.peerDB) - c = conn.cursor() - c.execute('''CREATE TABLE peers( - ID text not null, - name text, - adders text, - dateSeen not null, - trust int, - hashID text); - ''') - c.execute('''CREATE TABLE forwardKeys( - peerKey text not null, - forwardKey text not null, +def createPeerDB(): + ''' + Generate the peer sqlite3 database and populate it with the peers table. + ''' + # generate the peer database + conn = sqlite3.connect(dbfiles.user_id_info_db) + c = conn.cursor() + c.execute('''CREATE TABLE peers( + ID text not null, + name text, + adders text, + dateSeen not null, + trust int, + hashID text); + ''') + c.execute('''CREATE TABLE forwardKeys( + peerKey text not null, + forwardKey text not null, + date int not null, + expire int not null + );''') + conn.commit() + conn.close() + return + +def createBlockDB(): + ''' + Create a database for blocks + + hash - the hash of a block + dateReceived - the date the block was recieved, not necessarily when it was created + decrypted - if we can successfully decrypt the block (does not describe its current state) + dataType - data type of the block + dataFound - if the data has been found for the block + dataSaved - if the data has been saved for the block + sig - optional signature by the author (not optional if author is specified) + author - multi-round partial sha3-256 hash of authors public key + dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is + expire int - block expire date in epoch + ''' + if os.path.exists(dbfiles.block_meta_db): + raise FileExistsError("Block database already exists") + conn = sqlite3.connect(dbfiles.block_meta_db) + c = conn.cursor() + c.execute('''CREATE TABLE hashes( + hash text not null, + dateReceived int, + decrypted int, + dataType text, + dataFound int, + dataSaved int, + sig text, + author text, + dateClaimed int, + expire int + ); + ''') + conn.commit() + conn.close() + return + +def createBlockDataDB(): + if os.path.exists(dbfiles.block_data_db): + raise FileExistsError("Block data database already exists") + conn = sqlite3.connect(dbfiles.block_data_db) + c = conn.cursor() + c.execute('''CREATE TABLE blockData( + hash text not null, + data blob not null + ); + ''') + conn.commit() + conn.close() + +def createForwardKeyDB(): + ''' + Create the forward secrecy key db (*for *OUR* keys*) + ''' + if os.path.exists(dbfiles.forward_keys_db): + raise FileExistsError("Block database already exists") + conn = sqlite3.connect(dbfiles.forward_keys_db) + c = conn.cursor() + c.execute('''CREATE TABLE myForwardKeys( + peer text not null, + publickey text not null, + privatekey text not null, date int not null, expire int not null - );''') - conn.commit() - conn.close() - return + ); + ''') + conn.commit() + conn.close() + return - def createBlockDB(self): - ''' - Create a database for blocks - - hash - the hash of a block - dateReceived - the date the block was recieved, not necessarily when it was created - decrypted - if we can successfully decrypt the block (does not describe its current state) - dataType - data type of the block - dataFound - if the data has been found for the block - dataSaved - if the data has been saved for the block - sig - optional signature by the author (not optional if author is specified) - author - multi-round partial sha3-256 hash of authors public key - dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is - expire int - block expire date in epoch - ''' - if os.path.exists(self.core.blockDB): - raise FileExistsError("Block database already exists") - conn = sqlite3.connect(self.core.blockDB) - c = conn.cursor() - c.execute('''CREATE TABLE hashes( - hash text not null, - dateReceived int, - decrypted int, - dataType text, - dataFound int, - dataSaved int, - sig text, - author text, - dateClaimed int, - expire int - ); - ''') - conn.commit() - conn.close() - return - - def createBlockDataDB(self): - if os.path.exists(self.core.blockDataDB): - raise FileExistsError("Block data database already exists") - conn = sqlite3.connect(self.core.blockDataDB) - c = conn.cursor() - c.execute('''CREATE TABLE blockData( - hash text not null, - data blob not null - ); - ''') - conn.commit() - conn.close() - - def createForwardKeyDB(self): - ''' - Create the forward secrecy key db (*for *OUR* keys*) - ''' - if os.path.exists(self.core.forwardKeysFile): - raise FileExistsError("Block database already exists") - conn = sqlite3.connect(self.core.forwardKeysFile) - c = conn.cursor() - c.execute('''CREATE TABLE myForwardKeys( - peer text not null, - publickey text not null, - privatekey text not null, - date int not null, - expire int not null - ); - ''') - conn.commit() - conn.close() - return - - def createDaemonDB(self): - ''' - Create the daemon queue database - ''' - conn = sqlite3.connect(self.core.queueDB, timeout=10) - c = conn.cursor() - # Create table - c.execute('''CREATE TABLE commands (id integer primary key autoincrement, command text, data text, date text, responseID text)''') - conn.commit() - conn.close() \ No newline at end of file +def createDaemonDB(): + ''' + Create the daemon queue database + ''' + conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=10) + c = conn.cursor() + # Create table + c.execute('''CREATE TABLE commands (id integer primary key autoincrement, command text, data text, date text, responseID text)''') + conn.commit() + conn.close() \ No newline at end of file diff --git a/onionr/httpapi/apiutils/shutdown.py b/onionr/httpapi/apiutils/shutdown.py index 90cd33cb..53072eea 100644 --- a/onionr/httpapi/apiutils/shutdown.py +++ b/onionr/httpapi/apiutils/shutdown.py @@ -20,7 +20,7 @@ from flask import Blueprint, Response import core, onionrblockapi, onionrexceptions from onionrutils import stringvalidators - +from coredb import daemonqueue shutdown_bp = Blueprint('shutdown', __name__) def shutdown(client_api_inst): @@ -34,5 +34,5 @@ def shutdown(client_api_inst): @shutdown_bp.route('/shutdownclean') def shutdown_clean(): # good for calling from other clients - core.Core().daemonQueueAdd('shutdown') + daemonqueue.daemon_queue_add('shutdown') return Response("bye") \ No newline at end of file diff --git a/onionr/onionr.py b/onionr/onionr.py index f9b361ab..c255b50b 100755 --- a/onionr/onionr.py +++ b/onionr/onionr.py @@ -300,11 +300,6 @@ class Onionr: if verbosity >= 2: function('Running on %s %s' % (platform.platform(), platform.release()), terminal=True) - def doPEX(self): - '''make communicator do pex''' - logger.info('Sending pex to command queue...') - self.onionrCore.daemonQueueAdd('pex') - def listKeys(self): ''' Displays a list of keys (used to be called peers) (?) diff --git a/onionr/onionrcommands/__init__.py b/onionr/onionrcommands/__init__.py index 59c9d6bc..39a272f2 100755 --- a/onionr/onionrcommands/__init__.py +++ b/onionr/onionrcommands/__init__.py @@ -21,7 +21,7 @@ import webbrowser, sys import logger from . import pubkeymanager, onionrstatistics, daemonlaunch, filecommands, plugincommands, keyadders -from . import banblocks, exportblocks, openwebinterface, resettor +from . import banblocks, exportblocks, openwebinterface, resettor, dopex from onionrutils import importnewblocks def show_help(o_inst, command): @@ -115,7 +115,7 @@ def get_commands(onionr_inst): 'importblocks': importnewblocks.import_new_blocks, 'introduce': onionr_inst.onionrCore.introduceNode, - 'pex': onionr_inst.doPEX, + 'pex': dopex.do_PEX, 'getpassword': onionr_inst.printWebPassword, 'get-password': onionr_inst.printWebPassword, diff --git a/onionr/onionrcommands/daemonlaunch.py b/onionr/onionrcommands/daemonlaunch.py index d24b2dff..d2c81ea5 100755 --- a/onionr/onionrcommands/daemonlaunch.py +++ b/onionr/onionrcommands/daemonlaunch.py @@ -24,6 +24,7 @@ import onionr, apiservers, logger, communicator import onionrevents as events from netcontroller import NetController from onionrutils import localcommand +from coredb import daemonqueue def _proper_shutdown(o_inst): localcommand.local_command(o_inst.onionrCore, 'shutdown') @@ -103,7 +104,7 @@ def daemon(o_inst): break # Break out if sigterm for clean exit signal.signal(signal.SIGINT, _ignore_sigint) - o_inst.onionrCore.daemonQueueAdd('shutdown') + daemonqueue.daemon_queue_add('shutdown') localcommand.local_command(o_inst.onionrCore, 'shutdown') net.killTor() @@ -124,7 +125,7 @@ def kill_daemon(o_inst): events.event('daemon_stop', onionr = o_inst) net = NetController(o_inst.onionrCore.config.get('client.port', 59496)) try: - o_inst.onionrCore.daemonQueueAdd('shutdown') + daemonqueue.daemon_queue_qdd('shutdown') except sqlite3.OperationalError: pass diff --git a/onionr/onionrcommands/onionrstatistics.py b/onionr/onionrcommands/onionrstatistics.py index fd54516b..e17d23e3 100755 --- a/onionr/onionrcommands/onionrstatistics.py +++ b/onionr/onionrcommands/onionrstatistics.py @@ -23,7 +23,7 @@ from onionrblockapi import Block import onionr from onionrutils import checkcommunicator, mnemonickeys from utils import sizeutils -from coredb import blockmetadb +from coredb import blockmetadb, daemonqueue def show_stats(o_inst): try: # define stats messages here @@ -96,11 +96,11 @@ def show_details(o_inst): def show_peers(o_inst): randID = str(uuid.uuid4()) - o_inst.onionrCore.daemonQueueAdd('connectedPeers', responseID=randID) + daemonqueue.daemon_queue_add('connectedPeers', responseID=randID) while True: try: time.sleep(3) - peers = o_inst.onionrCore.daemonQueueGetResponse(randID) + peers = daemonqueue.daemon_queue_get_response(randID) except KeyboardInterrupt: break if not type(peers) is None: diff --git a/onionr/onionrcrypto.py b/onionr/onionrcrypto/__init__.py similarity index 100% rename from onionr/onionrcrypto.py rename to onionr/onionrcrypto/__init__.py diff --git a/onionr/onionrpluginapi.py b/onionr/onionrpluginapi.py index 9b9842c3..31bf99fa 100755 --- a/onionr/onionrpluginapi.py +++ b/onionr/onionrpluginapi.py @@ -20,7 +20,7 @@ import onionrplugins, core as onionrcore, logger from onionrutils import localcommand - +from coredb import daemonqueue class DaemonAPI: def __init__(self, pluginapi): self.pluginapi = pluginapi @@ -36,15 +36,13 @@ class DaemonAPI: return def queue(self, command, data = ''): - self.pluginapi.get_core().daemonQueueAdd(command, data) - - return + return daemonqueue.daemon_queue_add(command, data) def local_command(self, command): return localcommand.local_command(self.pluginapi.get_core(), command) def queue_pop(self): - return self.get_core().daemonQueue() + return daemonqueue.daemon_queue() class PluginAPI: def __init__(self, pluginapi): diff --git a/onionr/onionrusers/onionrusers.py b/onionr/onionrusers/onionrusers.py index 28a5784d..9ba61aa1 100755 --- a/onionr/onionrusers/onionrusers.py +++ b/onionr/onionrusers/onionrusers.py @@ -21,6 +21,7 @@ import logger, onionrexceptions, json, sqlite3, time from onionrutils import stringvalidators, bytesconverter, epoch import unpaddedbase32 import nacl.exceptions +from coredb import keydb def deleteExpiredKeys(coreInst): # Fetch the keys we generated for the peer, that are still around