diff --git a/onionr/blockimporter.py b/onionr/blockimporter.py
index ffd9ba63..d64d0e13 100755
--- a/onionr/blockimporter.py
+++ b/onionr/blockimporter.py
@@ -17,20 +17,20 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
-import core, onionrexceptions, logger
+import onionrexceptions, logger
from onionrutils import validatemetadata, blockmetadata
from coredb import blockmetadb
-def importBlockFromData(content, coreInst):
+import onionrcrypto, onionrblacklist, onionrstorage
+def importBlockFromData(content):
+ crypto = onionrcrypto.OnionrCrypto()
+ blacklist = onionrblacklist.OnionrBlackList()
retData = False
- dataHash = coreInst._crypto.sha3Hash(content)
+ dataHash = crypto.sha3Hash(content)
- if coreInst._blacklist.inBlacklist(dataHash):
+ if blacklist.inBlacklist(dataHash):
raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
- if not isinstance(coreInst, core.Core):
- raise Exception("coreInst must be an Onionr core instance")
-
try:
content = content.encode()
except AttributeError:
@@ -38,15 +38,15 @@ def importBlockFromData(content, coreInst):
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
- if validatemetadata.validate_metadata(coreInst, metadata, metas[2]): # check if metadata is valid
- if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
+ if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid
+ if crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.', terminal=True)
try:
- blockHash = coreInst.setData(content)
+ blockHash = onionrstorage.setdata(content)
except onionrexceptions.DiskAllocationReached:
pass
else:
blockmetadb.add_to_block_DB(blockHash, dataSaved=True)
- blockmetadata.process_block_metadata(coreInst, blockHash) # caches block metadata values to block database
+ blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
retData = True
return retData
\ No newline at end of file
diff --git a/onionr/coredb/dbfiles.py b/onionr/coredb/dbfiles.py
index 4dda0cd2..b920d1d6 100644
--- a/onionr/coredb/dbfiles.py
+++ b/onionr/coredb/dbfiles.py
@@ -7,4 +7,5 @@ block_data_db = '%sblocks/block-data.db' % (home,)
daemon_queue_db = '%sdaemon-queue.db' % (home,)
address_info_db = '%saddress.db' % (home,)
user_id_info_db = '%susers.db' % (home,)
-forward_keys_db = '%sforward-keys.db' % (home,)
\ No newline at end of file
+forward_keys_db = '%sforward-keys.db' % (home,)
+blacklist_db = '%sblacklist.db' % (home,)
\ No newline at end of file
diff --git a/onionr/keymanager.py b/onionr/keymanager.py
index a288d87e..4c6e628f 100755
--- a/onionr/keymanager.py
+++ b/onionr/keymanager.py
@@ -18,17 +18,15 @@
along with this program. If not, see .
'''
from onionrutils import bytesconverter
-import onionrcrypto
+from onionrcrypto import generate
+import filepaths
class KeyManager:
- def __init__(self, crypto):
- assert isinstance(crypto, onionrcrypto.OnionrCrypto)
- self._core = crypto._core
- self.keyFile = crypto._keyFile
- self.crypto = crypto
+ def __init__(self):
+ self.keyFile = filepaths.keys_file
def addKey(self, pubKey=None, privKey=None):
if type(pubKey) is type(None) and type(privKey) is type(None):
- pubKey, privKey = self.crypto.generatePubKey()
+ pubKey, privKey = generate.generate_pub_key()
pubKey = bytesconverter.bytes_to_str(pubKey)
privKey = bytesconverter.bytes_to_str(privKey)
try:
@@ -70,11 +68,4 @@ class KeyManager:
for pair in keyData.split('\n'):
if pubKey in pair:
privKey = pair.split(',')[1]
- return privKey
-
- def changeActiveKey(self, pubKey):
- '''Change crypto.pubKey and crypto.privKey to a given key pair by specifying the public key'''
- if not pubKey in self.getPubkeyList():
- raise ValueError('That pubkey does not exist')
- self.crypto.pubKey = pubKey
- self.crypto.privKey = self.getPrivkey(pubKey)
\ No newline at end of file
+ return privKey
\ No newline at end of file
diff --git a/onionr/onionr.py b/onionr/onionr.py
index c255b50b..354f7ed3 100755
--- a/onionr/onionr.py
+++ b/onionr/onionr.py
@@ -34,16 +34,17 @@ from utils import detectoptimization
if detectoptimization.detect_optimization():
sys.stderr.write('Error, Onionr cannot be run in optimized mode\n')
sys.exit(1)
-
import os, base64, random, shutil, time, platform, signal
from threading import Thread
-import core, config, logger, onionrplugins as plugins, onionrevents as events
+import config, logger, onionrplugins as plugins, onionrevents as events
import netcontroller
from netcontroller import NetController
from onionrblockapi import Block
import onionrproofs, onionrexceptions, communicator, setupconfig
import onionrcommands as commands # Many command definitions are here
from utils import identifyhome
+from coredb import keydb
+import filepaths
try:
from urllib3.contrib.socks import SOCKSProxyManager
@@ -103,8 +104,6 @@ class Onionr:
plugins.disable(name, onionr = self, stop_event = False)
self.communicatorInst = None
- self.onionrCore = core.Core()
- self.onionrCore.onionrInst = self
#self.deleteRunFiles()
self.clientAPIInst = '' # Client http api instance
@@ -168,11 +167,11 @@ class Onionr:
def deleteRunFiles(self):
try:
- os.remove(self.onionrCore.publicApiHostFile)
+ os.remove(filepaths.public_API_host_file)
except FileNotFoundError:
pass
try:
- os.remove(self.onionrCore.privateApiHostFile)
+ os.remove(filepaths.private_API_host_file)
except FileNotFoundError:
pass
@@ -236,7 +235,7 @@ class Onionr:
def listPeers(self):
logger.info('Peer transport address list:')
- for i in self.onionrCore.listAdders():
+ for i in keydb.listkeys.list_adders():
logger.info(i, terminal=True)
def getWebPassword(self):
@@ -304,7 +303,7 @@ class Onionr:
'''
Displays a list of keys (used to be called peers) (?)
'''
- logger.info('%sPublic keys in database: \n%s%s' % (logger.colors.fg.lightgreen, logger.colors.fg.green, '\n'.join(self.onionrCore.listPeers())), terminal=True)
+ logger.info('%sPublic keys in database: \n%s%s' % (logger.colors.fg.lightgreen, logger.colors.fg.green, '\n'.join(keydb.listkeys.list_peers()())), terminal=True)
def addPeer(self):
'''
diff --git a/onionr/onionrblacklist.py b/onionr/onionrblacklist.py
index 03da61b4..3542a1f5 100755
--- a/onionr/onionrblacklist.py
+++ b/onionr/onionrblacklist.py
@@ -17,19 +17,21 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
-import sqlite3, os, logger
+import sqlite3, os
+import logger, onionrcrypto
from onionrutils import epoch, bytesconverter
+from coredb import dbfiles
+crypto = onionrcrypto.OnionrCrypto()
class OnionrBlackList:
- def __init__(self, coreInst):
- self.blacklistDB = coreInst.dataDir + 'blacklist.db'
- self._core = coreInst
+ def __init__(self):
+ self.blacklistDB = dbfiles.blacklist_db
- if not os.path.exists(self.blacklistDB):
+ if not os.path.exists(dbfiles.blacklist_db):
self.generateDB()
return
def inBlacklist(self, data):
- hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
+ hashed = bytesconverter.bytes_to_str(crypto.sha3Hash(data))
retData = False
if not hashed.isalnum():
@@ -99,7 +101,7 @@ class OnionrBlackList:
2=pubkey
'''
# we hash the data so we can remove data entirely from our node's disk
- hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
+ hashed = bytesconverter.bytes_to_str(crypto.sha3Hash(data))
if len(hashed) > 64:
raise Exception("Hashed data is too large")
diff --git a/onionr/onionrblockapi.py b/onionr/onionrblockapi.py
index 9fd6c005..e85e4330 100755
--- a/onionr/onionrblockapi.py
+++ b/onionr/onionrblockapi.py
@@ -18,23 +18,25 @@
along with this program. If not, see .
'''
-import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions
-import json, os, sys, datetime, base64, onionrstorage
+import logger, config, onionrexceptions, nacl.exceptions
+import json, os, sys, datetime, base64, onionrstorage, onionrcrypto
from onionrusers import onionrusers
from onionrutils import stringvalidators, epoch
from coredb import blockmetadb
+from onionrstorage import removeblock
+import onionrblocks
class Block:
+ crypto = onionrcrypto.OnionrCrypto()
blockCacheOrder = list() # NEVER write your own code that writes to this!
blockCache = dict() # should never be accessed directly, look at Block.getCache()
- def __init__(self, hash = None, core = None, type = None, content = None, expire=None, decrypt=False, bypassReplayCheck=False):
+ def __init__(self, hash = None, type = None, content = None, expire=None, decrypt=False, bypassReplayCheck=False):
# take from arguments
# sometimes people input a bytes object instead of str in `hash`
if (not hash is None) and isinstance(hash, bytes):
hash = hash.decode()
self.hash = hash
- self.core = core
self.btype = type
self.bcontent = content
self.expire = expire
@@ -56,10 +58,6 @@ class Block:
self.validSig = False
self.autoDecrypt = decrypt
- # handle arguments
- if self.getCore() is None:
- self.core = onionrcore.Core()
-
self.update()
def decrypt(self, encodedData = True):
@@ -70,27 +68,26 @@ class Block:
if self.decrypted:
return True
retData = False
- core = self.getCore()
# decrypt data
if self.getHeader('encryptType') == 'asym':
try:
- self.bcontent = core._crypto.pubKeyDecrypt(self.bcontent, encodedData=encodedData)
- bmeta = core._crypto.pubKeyDecrypt(self.bmetadata, encodedData=encodedData)
+ self.bcontent = crypto.pubKeyDecrypt(self.bcontent, encodedData=encodedData)
+ bmeta = crypto.pubKeyDecrypt(self.bmetadata, encodedData=encodedData)
try:
bmeta = bmeta.decode()
except AttributeError:
# yet another bytes fix
pass
self.bmetadata = json.loads(bmeta)
- self.signature = core._crypto.pubKeyDecrypt(self.signature, encodedData=encodedData)
- self.signer = core._crypto.pubKeyDecrypt(self.signer, encodedData=encodedData)
+ self.signature = crypto.pubKeyDecrypt(self.signature, encodedData=encodedData)
+ self.signer = crypto.pubKeyDecrypt(self.signer, encodedData=encodedData)
self.bheader['signer'] = self.signer.decode()
self.signedData = json.dumps(self.bmetadata) + self.bcontent.decode()
# Check for replay attacks
try:
if epoch.get_epoch() - blockmetadb.get_block_date(self.hash) > 60:
- assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply'])
+ assert self.crypto.replayTimestampValidation(self.bmetadata['rply'])
except (AssertionError, KeyError, TypeError) as e:
if not self.bypassReplayCheck:
# Zero out variables to prevent reading of replays
@@ -106,7 +103,7 @@ class Block:
pass
else:
try:
- self.bcontent = onionrusers.OnionrUser(self.getCore(), self.signer).forwardDecrypt(self.bcontent)
+ self.bcontent = onionrusers.OnionrUser(self.signer).forwardDecrypt(self.bcontent)
except (onionrexceptions.DecryptionError, nacl.exceptions.CryptoError) as e:
logger.error(str(e))
pass
@@ -127,9 +124,7 @@ class Block:
Verify if a block's signature is signed by its claimed signer
'''
- core = self.getCore()
-
- if core._crypto.edVerify(data=self.signedData, key=self.signer, sig=self.signature, encodedData=True):
+ if crypto.edVerify(data=self.signedData, key=self.signer, sig=self.signature, encodedData=True):
self.validSig = True
else:
self.validSig = False
@@ -158,7 +153,7 @@ class Block:
# import from file
if blockdata is None:
try:
- blockdata = onionrstorage.getData(self.core, self.getHash()).decode()
+ blockdata = onionrstorage.getData(self.getHash()).decode()
except AttributeError:
raise onionrexceptions.NoDataAvailable('Block does not exist')
else:
@@ -220,7 +215,8 @@ class Block:
os.remove(self.getBlockFile())
except TypeError:
pass
- self.getCore().removeBlock(self.getHash())
+
+ removeblock.remove_block(self.getHash())
return True
return False
@@ -238,14 +234,8 @@ class Block:
try:
if self.isValid() is True:
- '''
- if (not self.getBlockFile() is None) and (recreate is True):
- onionrstorage.store(self.core, self.getRaw().encode())
- #with open(self.getBlockFile(), 'wb') as blockFile:
- # blockFile.write(self.getRaw().encode())
- else:
- '''
- self.hash = self.getCore().insertBlock(self.getRaw(), header = self.getType(), sign = sign, meta = self.getMetadata(), expire = self.getExpire())
+
+ self.hash = onionrblocks.insert(self.getRaw(), header = self.getType(), sign = sign, meta = self.getMetadata(), expire = self.getExpire())
if self.hash != False:
self.update()
@@ -278,16 +268,6 @@ class Block:
return self.hash
- def getCore(self):
- '''
- Returns the Core instance being used by the Block
-
- Outputs:
- - (Core): the Core instance
- '''
-
- return self.core
-
def getType(self):
'''
Returns the type of the block
@@ -363,7 +343,7 @@ class Block:
if self.parent == self.getHash():
self.parent = self
elif Block.exists(self.parent):
- self.parent = Block(self.getMetadata('parent'), core = self.getCore())
+ self.parent = Block(self.getMetadata('parent'))
else:
self.parent = None
@@ -445,7 +425,7 @@ class Block:
if (not self.isSigned()) or (not stringvalidators.validate_pub_key(signer)):
return False
- return bool(self.getCore()._crypto.edVerify(self.getSignedData(), signer, self.getSignature(), encodedData = encodedData))
+ return bool(crypto.edVerify(self.getSignedData(), signer, self.getSignature(), encodedData = encodedData))
except:
return False
@@ -511,7 +491,7 @@ class Block:
'''
if type(parent) == str:
- parent = Block(parent, core = self.getCore())
+ parent = Block(parent)
self.parent = parent
self.setMetadata('parent', (None if parent is None else self.getParent().getHash()))
@@ -519,7 +499,7 @@ class Block:
# static functions
- def getBlocks(type = None, signer = None, signed = None, parent = None, reverse = False, limit = None, core = None):
+ def getBlocks(type = None, signer = None, signed = None, parent = None, reverse = False, limit = None):
'''
Returns a list of Block objects based on supplied filters
@@ -528,24 +508,22 @@ class Block:
- signer (str/list): filters by signer (one in the list has to be a signer)
- signed (bool): filters out by whether or not the block is signed
- reverse (bool): reverses the list if True
- - core (Core): lets you optionally supply a core instance so one doesn't need to be started
Outputs:
- (list): a list of Block objects that match the input
'''
try:
- core = (core if not core is None else onionrcore.Core())
if (not parent is None) and (not isinstance(parent, Block)):
- parent = Block(hash = parent, core = core)
+ parent = Block(hash = parent)
relevant_blocks = list()
blocks = (blockmetadb.get_block_list() if type is None else blockmetadb.get_blocks_by_type(type))
for block in blocks:
if Block.exists(block):
- block = Block(block, core = core)
+ block = Block(block)
relevant = True
@@ -586,7 +564,7 @@ class Block:
return list()
- def mergeChain(child, file = None, maximumFollows = 1000, core = None):
+ def mergeChain(child, file = None, maximumFollows = 1000):
'''
Follows a child Block to its root parent Block, merging content
@@ -596,8 +574,6 @@ class Block:
- maximumFollows (int): the maximum number of Blocks to follow
'''
- # validate data and instantiate Core
- core = (core if not core is None else onionrcore.Core())
maximumFollows = max(0, maximumFollows)
# type conversions
@@ -617,7 +593,7 @@ class Block:
if len(blocks) - 1 >= maximumFollows:
break
- block = Block(blocks[-1], core = core).getParent()
+ block = Block(blocks[-1]).getParent()
# end if there is no parent Block
if block is None:
@@ -637,7 +613,7 @@ class Block:
# combine block contents
for hash in blocks:
- block = Block(hash, core = core)
+ block = Block(hash)
contents = block.getContent()
contents = base64.b64decode(contents.encode())
@@ -669,16 +645,11 @@ class Block:
# no input data? scrap it.
if bHash is None:
return False
- '''
- if type(hash) == Block:
- blockfile = hash.getBlockFile()
- else:
- blockfile = onionrcore.Core().dataDir + 'blocks/%s.dat' % hash
- '''
+
if isinstance(bHash, Block):
bHash = bHash.getHash()
- ret = isinstance(onionrstorage.getData(onionrcore.Core(), bHash), type(None))
+ ret = isinstance(onionrstorage.getData(bHash), type(None))
return not ret
diff --git a/onionr/onionrblocks/insert.py b/onionr/onionrblocks/insert.py
index 31558048..b4727511 100644
--- a/onionr/onionrblocks/insert.py
+++ b/onionr/onionrblocks/insert.py
@@ -1,17 +1,17 @@
import json
from onionrutils import bytesconverter, epoch
-import storagecounter, filepaths, onionrvalues, onionrstorage
+import storagecounter, filepaths, onionrstorage
import onionrevents as events
-from etc import powchoice
-crypto = onionrcrypto.OnionrCrypto()
-use_subprocess = powchoice.use_subprocess()
-def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False):
+from etc import powchoice, onionrvalues
+def insert_block(onionr_inst, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False):
'''
Inserts a block into the network
encryptType must be specified to encrypt a block
'''
+ use_subprocess = powchoice.use_subprocess(onionr_inst.config)
requirements = onionrvalues.OnionrValues()
storage_counter = storagecounter.StorageCounter()
+ crypto = onionr_inst.crypto
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
if storage_counter.isFull():
logger.error(allocationReachedMessage)
@@ -23,7 +23,7 @@ def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asym
createTime = epoch.get_epoch()
- dataNonce = bytesconverter.bytes_to_str(crypto.sha3Hash(data))
+ dataNonce = bytesconverter.bytes_to_str(hashers.sha3_hash(data))
try:
with open(filepaths.data_nonce_file, 'r') as nonces:
if dataNonce in nonces:
diff --git a/onionr/onionrcrypto/__init__.py b/onionr/onionrcrypto/__init__.py
index f6252863..b1fa4af5 100755
--- a/onionr/onionrcrypto/__init__.py
+++ b/onionr/onionrcrypto/__init__.py
@@ -25,6 +25,7 @@ from onionrutils import stringvalidators, epoch, bytesconverter
import filepaths
import onionrexceptions, keymanager, onionrutils
import config
+from . import generate, hashers
config.reload()
class OnionrCrypto:
@@ -175,9 +176,7 @@ class OnionrCrypto:
def generatePubKey(self):
'''Generate a Ed25519 public key pair, return tuple of base32encoded pubkey, privkey'''
- private_key = nacl.signing.SigningKey.generate()
- public_key = private_key.verify_key.encode(encoder=nacl.encoding.Base32Encoder())
- return (public_key.decode(), private_key.encode(encoder=nacl.encoding.Base32Encoder()).decode())
+ return generate.generate_pub_key()
def generateDeterministic(self, passphrase, bypassCheck=False):
'''Generate a Ed25519 public key pair from a password'''
@@ -215,20 +214,10 @@ class OnionrCrypto:
return result
def sha3Hash(self, data):
- try:
- data = data.encode()
- except AttributeError:
- pass
- hasher = hashlib.sha3_256()
- hasher.update(data)
- return hasher.hexdigest()
+ return hashers.sha3_hash(data)
def blake2bHash(self, data):
- try:
- data = data.encode()
- except AttributeError:
- pass
- return nacl.hash.blake2b(data)
+ return hashers.blake2b_hash(data)
def verifyPow(self, blockContent):
'''
diff --git a/onionr/onionrcrypto/generate.py b/onionr/onionrcrypto/generate.py
new file mode 100644
index 00000000..a92271de
--- /dev/null
+++ b/onionr/onionrcrypto/generate.py
@@ -0,0 +1,6 @@
+import nacl.signing, nacl.encoding
+def generate_pub_key():
+ '''Generate a Ed25519 public key pair, return tuple of base32encoded pubkey, privkey'''
+ private_key = nacl.signing.SigningKey.generate()
+ public_key = private_key.verify_key.encode(encoder=nacl.encoding.Base32Encoder())
+ return (public_key.decode(), private_key.encode(encoder=nacl.encoding.Base32Encoder()).decode())
\ No newline at end of file
diff --git a/onionr/onionrcrypto/hashers.py b/onionr/onionrcrypto/hashers.py
new file mode 100644
index 00000000..007d923d
--- /dev/null
+++ b/onionr/onionrcrypto/hashers.py
@@ -0,0 +1,16 @@
+import hashlib, nacl.hash
+def sha3_hash(data):
+ try:
+ data = data.encode()
+ except AttributeError:
+ pass
+ hasher = hashlib.sha3_256()
+ hasher.update(data)
+ return hasher.hexdigest()
+
+def blake2b_hash(data):
+ try:
+ data = data.encode()
+ except AttributeError:
+ pass
+ return nacl.hash.blake2b(data)
\ No newline at end of file
diff --git a/onionr/onionrpeers/peercleanup.py b/onionr/onionrpeers/peercleanup.py
index 4446aa3c..ca3ba002 100644
--- a/onionr/onionrpeers/peercleanup.py
+++ b/onionr/onionrpeers/peercleanup.py
@@ -21,12 +21,13 @@ import sqlite3
import logger
from onionrutils import epoch
from . import scoresortedpeerlist, peerprofiles
-def peer_cleanup(core_inst):
+import onionrblacklist
+from coredb import keydb
+def peer_cleanup(onionr_inst):
'''Removes peers who have been offline too long or score too low'''
- config = core_inst.config
logger.info('Cleaning peers...')
-
- adders = scoresortedpeerlist.get_score_sorted_peer_list(core_inst)
+ blacklist = onionrblacklist.OnionrBlackList()
+ adders = scoresortedpeerlist.get_score_sorted_peer_list()
adders.reverse()
if len(adders) > 1:
@@ -36,14 +37,14 @@ def peer_cleanup(core_inst):
for address in adders:
# Remove peers that go below the negative score
- if peerprofiles.PeerProfiles(address, core_inst).score < min_score:
- core_inst.removeAddress(address)
+ if peerprofiles.PeerProfiles(address).score < min_score:
+ keydb.removekeys.remove_address(address)
try:
- if (int(epoch.get_epoch()) - int(core_inst.getPeerInfo(address, 'dateSeen'))) >= 600:
+ if (int(epoch.get_epoch()) - int(keydb.transportinfo.get_address_info(address, 'dateSeen'))) >= 600:
expireTime = 600
else:
expireTime = 86400
- core_inst._blacklist.addToDB(address, dataType=1, expire=expireTime)
+ blacklist.addToDB(address, dataType=1, expire=expireTime)
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
pass
except ValueError:
@@ -51,4 +52,4 @@ def peer_cleanup(core_inst):
logger.warn('Removed address ' + address + '.')
# Unban probably not malicious peers TODO improve
- core_inst._blacklist.deleteExpired(dataType=1)
\ No newline at end of file
+ blacklist.deleteExpired(dataType=1)
\ No newline at end of file
diff --git a/onionr/onionrpeers/peerprofiles.py b/onionr/onionrpeers/peerprofiles.py
index 791e8c45..07b1115a 100644
--- a/onionr/onionrpeers/peerprofiles.py
+++ b/onionr/onionrpeers/peerprofiles.py
@@ -21,7 +21,7 @@ class PeerProfiles:
'''
PeerProfiles
'''
- def __init__(self, address, coreInst):
+ def __init__(self, address):
self.address = address # node address
self.score = None
self.friendSigCount = 0
@@ -29,8 +29,6 @@ class PeerProfiles:
self.failure = 0
self.connectTime = None
- self.coreInst = coreInst
-
self.loadScore()
self.getConnectTime()
return
diff --git a/onionr/onionrpeers/scoresortedpeerlist.py b/onionr/onionrpeers/scoresortedpeerlist.py
index 9c30e586..4f3a5061 100644
--- a/onionr/onionrpeers/scoresortedpeerlist.py
+++ b/onionr/onionrpeers/scoresortedpeerlist.py
@@ -18,14 +18,15 @@
along with this program. If not, see .
'''
from . import peerprofiles
-def get_score_sorted_peer_list(coreInst):
- peer_list = coreInst.listAdders()
+from coredb import keydb
+def get_score_sorted_peer_list():
+ peer_list = keydb.listkeys.list_adders()
peer_scores = {}
peer_times = {}
for address in peer_list:
# Load peer's profiles into a list
- profile = peerprofiles.PeerProfiles(address, coreInst)
+ profile = peerprofiles.PeerProfiles(address)
peer_scores[address] = profile.score
if not isinstance(profile.connectTime, type(None)):
peer_times[address] = profile.connectTime
diff --git a/onionr/onionrpluginapi.py b/onionr/onionrpluginapi.py
index 31bf99fa..1208b267 100755
--- a/onionr/onionrpluginapi.py
+++ b/onionr/onionrpluginapi.py
@@ -18,7 +18,7 @@
along with this program. If not, see .
'''
-import onionrplugins, core as onionrcore, logger
+import onionrplugins, logger, onionrcrypto
from onionrutils import localcommand
from coredb import daemonqueue
class DaemonAPI:
@@ -39,7 +39,7 @@ class DaemonAPI:
return daemonqueue.daemon_queue_add(command, data)
def local_command(self, command):
- return localcommand.local_command(self.pluginapi.get_core(), command)
+ return localcommand.local_command(command)
def queue_pop(self):
return daemonqueue.daemon_queue()
@@ -149,15 +149,12 @@ class pluginapi:
def __init__(self, onionr, data):
self.onionr = onionr
self.data = data
- if self.onionr is None:
- self.core = onionrcore.Core()
- else:
- self.core = self.onionr.onionrCore
self.daemon = DaemonAPI(self)
self.plugins = PluginAPI(self)
self.commands = CommandAPI(self)
self.web = WebAPI(self)
+ self.crypto = onionrcrypto.OnionrCrypto()
def get_onionr(self):
return self.onionr
@@ -165,11 +162,8 @@ class pluginapi:
def get_data(self):
return self.data
- def get_core(self):
- return self.core
-
def get_crypto(self):
- return self.get_core()._crypto
+ return self.crypto
def get_daemonapi(self):
return self.daemon
diff --git a/onionr/onionrproofs.py b/onionr/onionrproofs.py
index 51f35325..bc555077 100755
--- a/onionr/onionrproofs.py
+++ b/onionr/onionrproofs.py
@@ -18,19 +18,18 @@
along with this program. If not, see .
'''
import multiprocessing, nacl.encoding, nacl.hash, nacl.utils, time, math, threading, binascii, sys, json
-import core, config, logger, onionrblockapi
+import config, logger, onionrblockapi, storagecounter, onionrcrypto
from onionrutils import bytesconverter
config.reload()
-
-def getDifficultyModifier(coreOrUtilsInst=None):
- '''Accepts a core or utils instance returns
- the difficulty modifier for block storage based
+crypto = onionrcrypto.OnionrCrypto()
+def getDifficultyModifier():
+ '''returns the difficulty modifier for block storage based
on a variety of factors, currently only disk use.
'''
classInst = coreOrUtilsInst
retData = 0
- useFunc = classInst.storage_counter.getPercent
+ useFunc = storagecounter.StorageCounter().getPercent
percentUse = useFunc()
@@ -43,7 +42,7 @@ def getDifficultyModifier(coreOrUtilsInst=None):
return retData
-def getDifficultyForNewBlock(data, ourBlock=True, coreInst=None):
+def getDifficultyForNewBlock(data, ourBlock=True):
'''
Get difficulty for block. Accepts size in integer, Block instance, or str/bytes full block contents
'''
@@ -59,7 +58,7 @@ def getDifficultyForNewBlock(data, ourBlock=True, coreInst=None):
else:
minDifficulty = config.get('general.minimum_block_pow', 4)
- retData = max(minDifficulty, math.floor(dataSize / 100000)) + getDifficultyModifier(coreInst)
+ retData = max(minDifficulty, math.floor(dataSize / 100000)) + getDifficultyModifier()
return retData
@@ -118,12 +117,11 @@ class DataPOW:
self.mainHash = '0' * 70
self.puzzle = self.mainHash[0:min(self.difficulty, len(self.mainHash))]
- myCore = core.Core()
for i in range(max(1, threadCount)):
- t = threading.Thread(name = 'thread%s' % i, target = self.pow, args = (True,myCore))
+ t = threading.Thread(name = 'thread%s' % i, target = self.pow, args = (True))
t.start()
- def pow(self, reporting = False, myCore = None):
+ def pow(self, reporting = False):
startTime = math.floor(time.time())
self.hashing = True
self.reporting = reporting
@@ -187,20 +185,13 @@ class DataPOW:
return result
class POW:
- def __init__(self, metadata, data, threadCount = 1, forceDifficulty=0, coreInst=None):
+ def __init__(self, metadata, data, threadCount = 1, forceDifficulty=0):
self.foundHash = False
self.difficulty = 0
self.data = data
self.metadata = metadata
self.threadCount = threadCount
- try:
- assert isinstance(coreInst, core.Core)
- except AssertionError:
- myCore = core.Core()
- else:
- myCore = coreInst
-
json_metadata = json.dumps(metadata).encode()
try:
@@ -212,21 +203,18 @@ class POW:
self.difficulty = forceDifficulty
else:
# Calculate difficulty. Dumb for now, may use good algorithm in the future.
- self.difficulty = getDifficultyForNewBlock(bytes(json_metadata + b'\n' + self.data), coreInst=myCore)
+ self.difficulty = getDifficultyForNewBlock(bytes(json_metadata + b'\n' + self.data))
-
- logger.info('Computing POW (difficulty: %s)...' % self.difficulty)
+ logger.info('Computing POW (difficulty: %s)...' % (self.difficulty,))
self.mainHash = '0' * 64
self.puzzle = self.mainHash[0:min(self.difficulty, len(self.mainHash))]
for i in range(max(1, threadCount)):
- t = threading.Thread(name = 'thread%s' % i, target = self.pow, args = (True,myCore))
+ t = threading.Thread(name = 'thread%s' % i, target = self.pow, args = (True))
t.start()
- self.myCore = myCore
- return
- def pow(self, reporting = False, myCore = None):
+ def pow(self, reporting = False):
startTime = math.floor(time.time())
self.hashing = True
self.reporting = reporting
@@ -239,7 +227,7 @@ class POW:
#token = nacl.hash.blake2b(rand + self.data).decode()
self.metadata['pow'] = nonce
payload = json.dumps(self.metadata).encode() + b'\n' + self.data
- token = myCore._crypto.sha3Hash(payload)
+ token = crypto.sha3Hash(payload)
try:
# on some versions, token is bytes
token = token.decode()
diff --git a/onionr/onionrservices/__init__.py b/onionr/onionrservices/__init__.py
index 1f56dabe..0d33b973 100755
--- a/onionr/onionrservices/__init__.py
+++ b/onionr/onionrservices/__init__.py
@@ -19,17 +19,14 @@
'''
import time
import stem
-import core
from . import connectionserver, bootstrapservice
from onionrutils import stringvalidators, basicrequests
-
+import config
class OnionrServices:
'''
Create a client or server for connecting to peer interfaces
'''
- def __init__(self, onionr_core):
- assert isinstance(onionr_core, core.Core)
- self._core = onionr_core
+ def __init__(self):
self.servers = {}
self.clients = {}
self.shutdown = False
@@ -45,11 +42,11 @@ class OnionrServices:
TRY_WAIT = 3 # Seconds to wait before trying bootstrap again
# HTTP is fine because .onion/i2p is encrypted/authenticated
base_url = 'http://%s/' % (address,)
- socks = self._core.config.get('tor.socksport')
+ socks = config.get('tor.socksport')
for x in range(BOOTSTRAP_TRIES):
- if basicrequests.do_get_request(self._core, base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
+ if basicrequests.do_get_request(base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
# if bootstrap sever is online, tell them our service address
- connectionserver.ConnectionServer(peer, address, core_inst=self._core)
+ connectionserver.ConnectionServer(peer, address)
else:
time.sleep(TRY_WAIT)
else:
diff --git a/onionr/onionrservices/bootstrapservice.py b/onionr/onionrservices/bootstrapservice.py
index 2c23983b..9d77ec9d 100755
--- a/onionr/onionrservices/bootstrapservice.py
+++ b/onionr/onionrservices/bootstrapservice.py
@@ -21,17 +21,14 @@ import time, threading, uuid
from gevent.pywsgi import WSGIServer, WSGIHandler
from stem.control import Controller
from flask import Flask, Response
-import core
from netcontroller import get_open_port
from . import httpheaders
from onionrutils import stringvalidators, epoch
-def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
+def bootstrap_client_service(peer, onionr_inst=None, bootstrap_timeout=300):
'''
Bootstrap client services
'''
- if core_inst is None:
- core_inst = core.Core()
if not stringvalidators.validate_pub_key(peer):
raise ValueError('Peer must be valid base32 ed25519 public key')
@@ -40,11 +37,11 @@ def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
bootstrap_app = Flask(__name__)
http_server = WSGIServer(('127.0.0.1', bootstrap_port), bootstrap_app, log=None)
try:
- assert core_inst.onionrInst.communicatorInst is not None
+ assert onionr_inst.communicatorInst is not None
except (AttributeError, AssertionError) as e:
pass
else:
- core_inst.onionrInst.communicatorInst.service_greenlets.append(http_server)
+ onionr_inst.communicatorInst.service_greenlets.append(http_server)
bootstrap_address = ''
shutdown = False
@@ -71,9 +68,9 @@ def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
else:
return Response("")
- with Controller.from_port(port=core_inst.config.get('tor.controlPort')) as controller:
+ with Controller.from_port(port=onionr_inst.config.get('tor.controlPort')) as controller:
# Connect to the Tor process for Onionr
- controller.authenticate(core_inst.config.get('tor.controlpassword'))
+ controller.authenticate(onionr_inst.config.get('tor.controlpassword'))
# Create the v3 onion service
response = controller.create_ephemeral_hidden_service({80: bootstrap_port}, key_type = 'NEW', key_content = 'ED25519-V3', await_publication = True)
core_inst.insertBlock(response.service_id, header='con', sign=True, encryptType='asym',
@@ -86,4 +83,4 @@ def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
# This line reached when server is shutdown by being bootstrapped
# Now that the bootstrap server has received a server, return the address
- return core_inst.keyStore.get(bs_id)
+ return onionr_inst.keyStore.get(bs_id)
diff --git a/onionr/onionrservices/connectionserver.py b/onionr/onionrservices/connectionserver.py
index 047fdcf1..237f105f 100755
--- a/onionr/onionrservices/connectionserver.py
+++ b/onionr/onionrservices/connectionserver.py
@@ -20,7 +20,7 @@
from gevent.pywsgi import WSGIServer
from stem.control import Controller
from flask import Flask
-import core, logger, httpapi
+import logger, httpapi
import onionrexceptions
from netcontroller import get_open_port
from httpapi import apiutils
@@ -28,21 +28,17 @@ from onionrutils import stringvalidators, basicrequests, bytesconverter
from . import httpheaders
class ConnectionServer:
- def __init__(self, peer, address, core_inst=None):
- if core_inst is None:
- self.core_inst = core.Core()
- else:
- self.core_inst = core_inst
+ def __init__(self, peer, address, onionr_inst=None):
if not stringvalidators.validate_pub_key(peer):
raise ValueError('Peer must be valid base32 ed25519 public key')
- socks = core_inst.config.get('tor.socksport') # Load config for Tor socks port for proxy
+ socks = onionr_inst.config.get('tor.socksport') # Load config for Tor socks port for proxy
service_app = Flask(__name__) # Setup Flask app for server.
service_port = get_open_port()
- service_ip = apiutils.setbindip.set_bind_IP(core_inst=self.core_inst)
+ service_ip = apiutils.setbindip.set_bind_IP()
http_server = WSGIServer(('127.0.0.1', service_port), service_app, log=None)
- core_inst.onionrInst.communicatorInst.service_greenlets.append(http_server)
+ onionr_inst.communicatorInst.service_greenlets.append(http_server)
# TODO define basic endpoints useful for direct connections like stats
@@ -54,7 +50,7 @@ class ConnectionServer:
@service_app.route('/close')
def shutdown_server():
- core_inst.onionrInst.communicatorInst.service_greenlets.remove(http_server)
+ onionr_inst.communicatorInst.service_greenlets.remove(http_server)
http_server.stop()
return Response('goodbye')
@@ -64,15 +60,15 @@ class ConnectionServer:
resp = httpheaders.set_default_onionr_http_headers(resp)
return resp
- with Controller.from_port(port=core_inst.config.get('tor.controlPort')) as controller:
+ with Controller.from_port(port=onionr_inst.config.get('tor.controlPort')) as controller:
# Connect to the Tor process for Onionr
- controller.authenticate(core_inst.config.get('tor.controlpassword'))
+ controller.authenticate(onionr_inst.config.get('tor.controlpassword'))
# Create the v3 onion service for the peer to connect to
response = controller.create_ephemeral_hidden_service({80: service_port}, await_publication = True, key_type='NEW', key_content = 'ED25519-V3')
try:
for x in range(3):
- attempt = basicrequests.do_post_request(self.core_inst, 'http://' + address + '/bs/' + response.service_id, port=socks)
+ attempt = basicrequests.do_post_request('http://' + address + '/bs/' + response.service_id, port=socks)
if attempt == 'success':
break
else:
@@ -82,8 +78,8 @@ class ConnectionServer:
raise ConnectionError('Could not reach %s bootstrap address %s' % (peer, address))
else:
# If no connection error, create the service and save it to local global key store
- self.core_inst.keyStore.put('dc-' + response.service_id, bytesconverter.bytes_to_str(peer))
+ self.onionr_inst.keyStore.put('dc-' + response.service_id, bytesconverter.bytes_to_str(peer))
logger.info('hosting on %s with %s' % (response.service_id, peer))
http_server.serve_forever()
http_server.stop()
- self.core_inst.keyStore.delete('dc-' + response.service_id)
\ No newline at end of file
+ self.onionr_inst.keyStore.delete('dc-' + response.service_id)
\ No newline at end of file
diff --git a/onionr/onionrstorage/__init__.py b/onionr/onionrstorage/__init__.py
index 4bfc8bf6..c642baad 100755
--- a/onionr/onionrstorage/__init__.py
+++ b/onionr/onionrstorage/__init__.py
@@ -17,31 +17,31 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
-import core, sys, sqlite3, os, dbcreator, onionrexceptions
+import sys, sqlite3, os
from onionrutils import bytesconverter, stringvalidators
-
+from coredb import dbfiles
+import filepaths, onionrcrypto, dbcreator, onionrexceptions
+from onionrcrypto import hashers
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
-def dbCreate(coreInst):
+def dbCreate():
try:
- dbcreator.DBCreator(coreInst).createBlockDataDB()
+ dbcreator.DBCreator().createBlockDataDB()
except FileExistsError:
pass
-def _dbInsert(coreInst, blockHash, data):
- assert isinstance(coreInst, core.Core)
- dbCreate(coreInst)
- conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
+def _dbInsert(blockHash, data):
+ dbCreate()
+ conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
c = conn.cursor()
data = (blockHash, data)
c.execute('INSERT INTO blockData (hash, data) VALUES(?, ?);', data)
conn.commit()
conn.close()
-def _dbFetch(coreInst, blockHash):
- assert isinstance(coreInst, core.Core)
- dbCreate(coreInst)
- conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
+def _dbFetch(blockHash):
+ dbCreate()
+ conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
c = conn.cursor()
for i in c.execute('SELECT data from blockData where hash = ?', (blockHash,)):
return i[0]
@@ -49,14 +49,13 @@ def _dbFetch(coreInst, blockHash):
conn.close()
return None
-def deleteBlock(coreInst, blockHash):
+def deleteBlock(blockHash):
# You should call core.removeBlock if you automatically want to remove storage byte count
- assert isinstance(coreInst, core.Core)
- if os.path.exists('%s/%s.dat' % (coreInst.blockDataLocation, blockHash)):
- os.remove('%s/%s.dat' % (coreInst.blockDataLocation, blockHash))
+ if os.path.exists('%s/%s.dat' % (filepaths.block_data_location, blockHash)):
+ os.remove('%s/%s.dat' % (filepaths.block_data_location, blockHash))
return True
- dbCreate(coreInst)
- conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
+ dbCreate()
+ conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
c = conn.cursor()
data = (blockHash,)
c.execute('DELETE FROM blockData where hash = ?', data)
@@ -64,23 +63,21 @@ def deleteBlock(coreInst, blockHash):
conn.close()
return True
-def store(coreInst, data, blockHash=''):
- assert isinstance(coreInst, core.Core)
+def store(data, blockHash=''):
assert stringvalidators.validate_hash(blockHash)
- ourHash = coreInst._crypto.sha3Hash(data)
+ ourHash = hashers.sha3_hash(data)
if blockHash != '':
assert ourHash == blockHash
else:
blockHash = ourHash
if DB_ENTRY_SIZE_LIMIT >= sys.getsizeof(data):
- _dbInsert(coreInst, blockHash, data)
+ _dbInsert(blockHash, data)
else:
- with open('%s/%s.dat' % (coreInst.blockDataLocation, blockHash), 'wb') as blockFile:
+ with open('%s/%s.dat' % (filepaths.block_data_location, blockHash), 'wb') as blockFile:
blockFile.write(data)
-def getData(coreInst, bHash):
- assert isinstance(coreInst, core.Core)
+def getData(bHash):
assert stringvalidators.validate_hash(bHash)
bHash = bytesconverter.bytes_to_str(bHash)
@@ -89,7 +86,7 @@ def getData(coreInst, bHash):
# if no entry, check disk
# If no entry in either, raise an exception
retData = None
- fileLocation = '%s/%s.dat' % (coreInst.blockDataLocation, bHash)
+ fileLocation = '%s/%s.dat' % (filepaths.block_data_location, bHash)
if os.path.exists(fileLocation):
with open(fileLocation, 'rb') as block:
retData = block.read()
diff --git a/onionr/onionrstorage/removeblock.py b/onionr/onionrstorage/removeblock.py
index 76112199..9aadea51 100644
--- a/onionr/onionrstorage/removeblock.py
+++ b/onionr/onionrstorage/removeblock.py
@@ -1,7 +1,9 @@
import sys, sqlite3
import onionrexceptions, onionrstorage
from onionrutils import stringvalidators
-def remove_block(core_inst, block):
+from coredb import dbfiles
+import storagecounter
+def remove_block(block):
'''
remove a block from this node (does not automatically blacklist)
@@ -9,13 +11,13 @@ def remove_block(core_inst, block):
'''
if stringvalidators.validate_hash(block):
- conn = sqlite3.connect(core_inst.blockDB, timeout=30)
+ conn = sqlite3.connect(dbfiles.block_data_db, timeout=30)
c = conn.cursor()
t = (block,)
c.execute('Delete from hashes where hash=?;', t)
conn.commit()
conn.close()
- dataSize = sys.getsizeof(onionrstorage.getData(core_inst, block))
- core_inst.storage_counter.removeBytes(dataSize)
+ dataSize = sys.getsizeof(onionrstorage.getData(block))
+ storagecounter.StorageCounter().removeBytes(dataSize)
else:
raise onionrexceptions.InvalidHexHash
\ No newline at end of file
diff --git a/onionr/onionrstorage/setdata.py b/onionr/onionrstorage/setdata.py
index 05cd24d1..096612f8 100644
--- a/onionr/onionrstorage/setdata.py
+++ b/onionr/onionrstorage/setdata.py
@@ -1,32 +1,35 @@
import sys, sqlite3
-import onionrstorage, onionrexceptions
-def set_data(core_inst, data):
+import onionrstorage, onionrexceptions, onionrcrypto
+import filepaths, storagecounter
+from coredb import dbfiles
+def set_data(data):
'''
Set the data assciated with a hash
'''
-
+ crypto = onionrcrypto.OnionrCrypto()
+ storage_counter = storagecounter.StorageCounter()
data = data
dataSize = sys.getsizeof(data)
if not type(data) is bytes:
data = data.encode()
- dataHash = core_inst._crypto.sha3Hash(data)
+ dataHash = crypto.sha3Hash(data)
if type(dataHash) is bytes:
dataHash = dataHash.decode()
- blockFileName = core_inst.blockDataLocation + dataHash + '.dat'
+ blockFileName = filepaths.block_data_location + dataHash + '.dat'
try:
- onionrstorage.getData(core_inst, dataHash)
+ onionrstorage.getData(dataHash)
except onionrexceptions.NoDataAvailable:
- if core_inst.storage_counter.addBytes(dataSize) != False:
- onionrstorage.store(core_inst, data, blockHash=dataHash)
- conn = sqlite3.connect(core_inst.blockDB, timeout=30)
+ if storage_counter.addBytes(dataSize) != False:
+ onionrstorage.store(data, blockHash=dataHash)
+ conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
c = conn.cursor()
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = ?;", (dataHash,))
conn.commit()
conn.close()
- with open(core_inst.dataNonceFile, 'a') as nonceFile:
+ with open(filepaths.data_nonce_file, 'a') as nonceFile:
nonceFile.write(dataHash + '\n')
else:
raise onionrexceptions.DiskAllocationReached
diff --git a/onionr/onionrusers/contactmanager.py b/onionr/onionrusers/contactmanager.py
index 680de9b0..89774b5c 100755
--- a/onionr/onionrusers/contactmanager.py
+++ b/onionr/onionrusers/contactmanager.py
@@ -21,13 +21,14 @@ import os, json, onionrexceptions
import unpaddedbase32
from onionrusers import onionrusers
from onionrutils import bytesconverter, epoch
-
+from utils import identifyhome
class ContactManager(onionrusers.OnionrUser):
- def __init__(self, coreInst, publicKey, saveUser=False, recordExpireSeconds=5):
+ def __init__(self, publicKey, saveUser=False, recordExpireSeconds=5):
publicKey = unpaddedbase32.repad(bytesconverter.str_to_bytes(publicKey)).decode()
- super(ContactManager, self).__init__(coreInst, publicKey, saveUser=saveUser)
- self.dataDir = coreInst.dataDir + '/contacts/'
- self.dataFile = '%s/contacts/%s.json' % (coreInst.dataDir, publicKey)
+ super(ContactManager, self).__init__(publicKey, saveUser=saveUser)
+ home = identifyhome.identify_home()
+ self.dataDir = home + '/contacts/'
+ self.dataFile = '%s/contacts/%s.json' % (home, publicKey)
self.lastRead = 0
self.recordExpire = recordExpireSeconds
self.data = self._loadData()
diff --git a/onionr/onionrusers/onionrusers.py b/onionr/onionrusers/onionrusers.py
index 9ba61aa1..1df6e75d 100755
--- a/onionr/onionrusers/onionrusers.py
+++ b/onionr/onionrusers/onionrusers.py
@@ -21,11 +21,11 @@ import logger, onionrexceptions, json, sqlite3, time
from onionrutils import stringvalidators, bytesconverter, epoch
import unpaddedbase32
import nacl.exceptions
-from coredb import keydb
+from coredb import keydb, dbfiles
-def deleteExpiredKeys(coreInst):
+def deleteExpiredKeys():
# Fetch the keys we generated for the peer, that are still around
- conn = sqlite3.connect(coreInst.forwardKeysFile, timeout=10)
+ conn = sqlite3.connect(dbfiles.forward_keys_db, timeout=10)
c = conn.cursor()
curTime = epoch.get_epoch()
@@ -35,8 +35,8 @@ def deleteExpiredKeys(coreInst):
conn.close()
return
-def deleteTheirExpiredKeys(coreInst, pubkey):
- conn = sqlite3.connect(coreInst.peerDB, timeout=10)
+def deleteTheirExpiredKeys(pubkey):
+ conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
# Prepare the insert
@@ -51,40 +51,41 @@ DEFAULT_KEY_EXPIRE = 604800
#DEFAULT_KEY_EXPIRE = 600
class OnionrUser:
- def __init__(self, coreInst, publicKey, saveUser=False):
+
+ def __init__(self, crypto_inst, publicKey, saveUser=False):
'''
OnionrUser is an abstraction for "users" of the network.
- Takes an instance of onionr core, a base32 encoded ed25519 public key, and a bool saveUser
+ Takes a base32 encoded ed25519 public key, and a bool saveUser
saveUser determines if we should add a user to our peer database or not.
'''
+ self.crypto = crypto_inst
publicKey = unpaddedbase32.repad(bytesconverter.str_to_bytes(publicKey)).decode()
self.trust = 0
- self._core = coreInst
self.publicKey = publicKey
if saveUser:
try:
- self._core.addPeer(publicKey)
+ keydb.addkeys.add_peer(publicKey)
except AssertionError:
pass
- self.trust = self._core.getPeerInfo(self.publicKey, 'trust')
+ self.trust = keydb.userinfo.get_user_info(self.publicKey, 'trust')
return
def setTrust(self, newTrust):
'''Set the peers trust. 0 = not trusted, 1 = friend, 2 = ultimate'''
- self._core.setPeerInfo(self.publicKey, 'trust', newTrust)
+ keydb.userinfo.set_user_info(self.publicKey, 'trust', newTrust)
def isFriend(self):
- if self._core.getPeerInfo(self.publicKey, 'trust') == 1:
+ if keydb.userinfo.set_peer_info(self.publicKey, 'trust') == 1:
return True
return False
def getName(self):
retData = 'anonymous'
- name = self._core.getPeerInfo(self.publicKey, 'name')
+ name = keydb.userinfo.get_user_info(self.publicKey, 'name')
try:
if len(name) > 0:
retData = name
@@ -93,20 +94,20 @@ class OnionrUser:
return retData
def encrypt(self, data):
- encrypted = self._core._crypto.pubKeyEncrypt(data, self.publicKey, encodedData=True)
+ encrypted = self.crypto.pubKeyEncrypt(data, self.publicKey, encodedData=True)
return encrypted
def decrypt(self, data):
- decrypted = self._core._crypto.pubKeyDecrypt(data, self.publicKey, encodedData=True)
+ decrypted = self.crypto.pubKeyDecrypt(data, self.publicKey, encodedData=True)
return decrypted
def forwardEncrypt(self, data):
- deleteTheirExpiredKeys(self._core, self.publicKey)
- deleteExpiredKeys(self._core)
+ deleteTheirExpiredKeys(self.publicKey)
+ deleteExpiredKeys()
retData = ''
forwardKey = self._getLatestForwardKey()
if stringvalidators.validate_pub_key(forwardKey[0]):
- retData = self._core._crypto.pubKeyEncrypt(data, forwardKey[0], encodedData=True)
+ retData = self.crypto.pubKeyEncrypt(data, forwardKey[0], encodedData=True)
else:
raise onionrexceptions.InvalidPubkey("No valid forward secrecy key available for this user")
#self.generateForwardKey()
@@ -116,7 +117,7 @@ class OnionrUser:
retData = ""
for key in self.getGeneratedForwardKeys(False):
try:
- retData = self._core._crypto.pubKeyDecrypt(encrypted, privkey=key[1], encodedData=True)
+ retData = self.crypto.pubKeyDecrypt(encrypted, privkey=key[1], encodedData=True)
except nacl.exceptions.CryptoError:
retData = False
else:
@@ -128,7 +129,7 @@ class OnionrUser:
def _getLatestForwardKey(self):
# Get the latest forward secrecy key for a peer
key = ""
- conn = sqlite3.connect(self._core.peerDB, timeout=10)
+ conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
# TODO: account for keys created at the same time (same epoch)
@@ -142,7 +143,7 @@ class OnionrUser:
return key
def _getForwardKeys(self):
- conn = sqlite3.connect(self._core.peerDB, timeout=10)
+ conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
keyList = []
@@ -157,11 +158,11 @@ class OnionrUser:
def generateForwardKey(self, expire=DEFAULT_KEY_EXPIRE):
# Generate a forward secrecy key for the peer
- conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
+ conn = sqlite3.connect(dbfiles.forward_keys_db, timeout=10)
c = conn.cursor()
# Prepare the insert
time = epoch.get_epoch()
- newKeys = self._core._crypto.generatePubKey()
+ newKeys = self.crypto.generatePubKey()
newPub = bytesconverter.bytes_to_str(newKeys[0])
newPriv = bytesconverter.bytes_to_str(newKeys[1])
@@ -175,7 +176,7 @@ class OnionrUser:
def getGeneratedForwardKeys(self, genNew=True):
# Fetch the keys we generated for the peer, that are still around
- conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
+ conn = sqlite3.connect(dbfiles.forward_keys_db, timeout=10)
c = conn.cursor()
pubkey = self.publicKey
pubkey = bytesconverter.bytes_to_str(pubkey)
@@ -197,7 +198,7 @@ class OnionrUser:
# Do not add if something went wrong with the key
raise onionrexceptions.InvalidPubkey(newKey)
- conn = sqlite3.connect(self._core.peerDB, timeout=10)
+ conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
# Get the time we're inserting the key at
@@ -222,8 +223,8 @@ class OnionrUser:
return True
@classmethod
- def list_friends(cls, coreInst):
+ def list_friends(cls):
friendList = []
- for x in coreInst.listPeers(trust=1):
- friendList.append(cls(coreInst, x))
+ for x in keydb.listkeys.list_peers(trust=1):
+ friendList.append(cls(x))
return list(friendList)
\ No newline at end of file
diff --git a/onionr/onionrutils/basicrequests.py b/onionr/onionrutils/basicrequests.py
index 3a6ec5c4..4d3fb862 100644
--- a/onionr/onionrutils/basicrequests.py
+++ b/onionr/onionrutils/basicrequests.py
@@ -19,13 +19,13 @@
'''
import requests, streamedrequests
import logger, onionrexceptions
-def do_post_request(core_inst, url, data={}, port=0, proxyType='tor', max_size=10000):
+def do_post_request(onionr_inst, url, data={}, port=0, proxyType='tor', max_size=10000):
'''
Do a POST request through a local tor or i2p instance
'''
if proxyType == 'tor':
if port == 0:
- port = core_inst.torPort
+ port = onionr_inst.torPort
proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)}
elif proxyType == 'i2p':
proxies = {'http': 'http://127.0.0.1:4444'}
@@ -44,11 +44,11 @@ def do_post_request(core_inst, url, data={}, port=0, proxyType='tor', max_size=1
retData = False
return retData
-def do_get_request(core_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False, max_size=5242880):
+def do_get_request(onionr_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False, max_size=5242880):
'''
Do a get request through a local tor or i2p instance
'''
- API_VERSION = core_inst.onionrInst.API_VERSION
+ API_VERSION = onionr_inst.onionrInst.API_VERSION
retData = False
if proxyType == 'tor':
if port == 0:
diff --git a/onionr/onionrutils/blockmetadata.py b/onionr/onionrutils/blockmetadata.py
index 97323d4d..08f97745 100644
--- a/onionr/onionrutils/blockmetadata.py
+++ b/onionr/onionrutils/blockmetadata.py
@@ -53,12 +53,12 @@ def get_block_metadata_from_data(blockData):
meta = metadata['meta']
return (metadata, meta, data)
-def process_block_metadata(core_inst, blockHash):
+def process_block_metadata(blockHash):
'''
Read metadata from a block and cache it to the block database
'''
curTime = epoch.get_rounded_epoch(roundS=60)
- myBlock = onionrblockapi.Block(blockHash, core_inst)
+ myBlock = onionrblockapi.Block(blockHash)
if myBlock.isEncrypted:
myBlock.decrypt()
if (myBlock.isEncrypted and myBlock.decrypted) or (not myBlock.isEncrypted):
@@ -67,7 +67,7 @@ def process_block_metadata(core_inst, blockHash):
signer = bytesconverter.bytes_to_str(myBlock.signer)
valid = myBlock.verifySig()
if myBlock.getMetadata('newFSKey') is not None:
- onionrusers.OnionrUser(core_inst, signer).addForwardKey(myBlock.getMetadata('newFSKey'))
+ onionrusers.OnionrUser(signer).addForwardKey(myBlock.getMetadata('newFSKey'))
try:
if len(blockType) <= 10:
@@ -85,7 +85,7 @@ def process_block_metadata(core_inst, blockHash):
blockmetadb.update_block_info(blockHash, 'expire', expireTime)
if not blockType is None:
blockmetadb.update_block_info(blockHash, 'dataType', blockType)
- onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst)
+ #onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst)
else:
pass
diff --git a/onionr/onionrutils/checkcommunicator.py b/onionr/onionrutils/checkcommunicator.py
index cfc2c31a..2e300f57 100644
--- a/onionr/onionrutils/checkcommunicator.py
+++ b/onionr/onionrutils/checkcommunicator.py
@@ -18,9 +18,10 @@
along with this program. If not, see .
'''
import time, os
-def is_communicator_running(core_inst, timeout = 5, interval = 0.1):
+import filepaths
+def is_communicator_running(timeout = 5, interval = 0.1):
try:
- runcheck_file = core_inst.dataDir + '.runcheck'
+ runcheck_file = filepaths.run_check_file
if not os.path.isfile(runcheck_file):
open(runcheck_file, 'w+').close()
diff --git a/onionr/onionrutils/importnewblocks.py b/onionr/onionrutils/importnewblocks.py
index 5420d0a6..1c9f55b5 100644
--- a/onionr/onionrutils/importnewblocks.py
+++ b/onionr/onionrutils/importnewblocks.py
@@ -18,19 +18,19 @@
along with this program. If not, see .
'''
import glob
-import logger, core
+import logger
from onionrutils import blockmetadata
from coredb import blockmetadb
-def import_new_blocks(core_inst=None, scanDir=''):
+import filepaths, onionrcrypto
+def import_new_blocks(scanDir=''):
'''
This function is intended to scan for new blocks ON THE DISK and import them
'''
- if core_inst is None:
- core_inst = core.Core()
+ crypto = onionrcrypto.OnionrCrypto()
blockList = blockmetadb.get_block_list()
exist = False
if scanDir == '':
- scanDir = core_inst.blockDataLocation
+ scanDir = filepaths.block_data_location
if not scanDir.endswith('/'):
scanDir += '/'
for block in glob.glob(scanDir + "*.dat"):
@@ -39,10 +39,10 @@ def import_new_blocks(core_inst=None, scanDir=''):
logger.info('Found new block on dist %s' % block, terminal=True)
with open(block, 'rb') as newBlock:
block = block.replace(scanDir, '').replace('.dat', '')
- if core_inst._crypto.sha3Hash(newBlock.read()) == block.replace('.dat', ''):
+ if crypto.sha3Hash(newBlock.read()) == block.replace('.dat', ''):
blockmetadb.add_to_block_DB(block.replace('.dat', ''), dataSaved=True)
logger.info('Imported block %s.' % block, terminal=True)
- blockmetadata.process_block_metadata(core_inst, block)
+ blockmetadata.process_block_metadata(block)
else:
logger.warn('Failed to verify hash for %s' % block, terminal=True)
if not exist:
diff --git a/onionr/onionrutils/mnemonickeys.py b/onionr/onionrutils/mnemonickeys.py
index 56085536..8310f072 100644
--- a/onionr/onionrutils/mnemonickeys.py
+++ b/onionr/onionrutils/mnemonickeys.py
@@ -19,9 +19,10 @@
'''
import base64
from etc import pgpwords
-def get_human_readable_ID(core_inst, pub=''):
+import onionrcrypto
+def get_human_readable_ID(pub=''):
'''gets a human readable ID from a public key'''
if pub == '':
- pub = core_inst._crypto.pubKey
+ pub = onionrcrypto.OnionrCrypto().pubKey
pub = base64.b16encode(base64.b32decode(pub)).decode()
return ' '.join(pgpwords.wordify(pub))
diff --git a/onionr/onionrutils/validatemetadata.py b/onionr/onionrutils/validatemetadata.py
index 2800c24a..ef3cde95 100644
--- a/onionr/onionrutils/validatemetadata.py
+++ b/onionr/onionrutils/validatemetadata.py
@@ -21,9 +21,12 @@ import json
import logger, onionrexceptions
from etc import onionrvalues
from onionrutils import stringvalidators, epoch, bytesconverter
-def validate_metadata(core_inst, metadata, blockData):
+import config, onionrvalues, filepaths, onionrcrypto
+def validate_metadata(metadata, blockData):
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
# TODO, make this check sane sizes
+ crypto = onionrcrypto.OnionrCrypto()
+ requirements = onionrvalues.OnionrValues()
retData = False
maxClockDifference = 120
@@ -35,11 +38,11 @@ def validate_metadata(core_inst, metadata, blockData):
pass
# Validate metadata dict for invalid keys to sizes that are too large
- maxAge = core_inst.config.get("general.max_block_age", onionrvalues.OnionrValues().default_expire)
+ maxAge = config.get("general.max_block_age", onionrvalues.OnionrValues().default_expire)
if type(metadata) is dict:
for i in metadata:
try:
- core_inst.requirements.blockMetadataLengths[i]
+ requirements.blockMetadataLengths[i]
except KeyError:
logger.warn('Block has invalid metadata key ' + i)
break
@@ -49,7 +52,7 @@ def validate_metadata(core_inst, metadata, blockData):
testData = len(testData)
except (TypeError, AttributeError) as e:
testData = len(str(testData))
- if core_inst.requirements.blockMetadataLengths[i] < testData:
+ if requirements.blockMetadataLengths[i] < testData:
logger.warn('Block metadata key ' + i + ' exceeded maximum size')
break
if i == 'time':
@@ -78,9 +81,9 @@ def validate_metadata(core_inst, metadata, blockData):
else:
# if metadata loop gets no errors, it does not break, therefore metadata is valid
# make sure we do not have another block with the same data content (prevent data duplication and replay attacks)
- nonce = bytesconverter.bytes_to_str(core_inst._crypto.sha3Hash(blockData))
+ nonce = bytesconverter.bytes_to_str(crypto.sha3Hash(blockData))
try:
- with open(core_inst.dataNonceFile, 'r') as nonceFile:
+ with open(filepaths.data_nonce_file, 'r') as nonceFile:
if nonce in nonceFile.read():
retData = False # we've seen that nonce before, so we can't pass metadata
raise onionrexceptions.DataExists
diff --git a/onionr/serializeddata.py b/onionr/serializeddata.py
index 8121c8d5..c721a30c 100755
--- a/onionr/serializeddata.py
+++ b/onionr/serializeddata.py
@@ -18,10 +18,10 @@
along with this program. If not, see .
'''
-import core, json
+import json
from coredb import blockmetadb
class SerializedData:
- def __init__(self, coreInst):
+ def __init__(self, o_inst):
'''
Serialized data is in JSON format:
{
@@ -30,14 +30,13 @@ class SerializedData:
etc
}
'''
- assert isinstance(coreInst, core.Core)
- self._core = coreInst
+ self.o_inst = o_inst
def getStats(self):
'''Return statistics about our node'''
stats = {}
- stats['uptime'] = self._core.onionrInst.communicatorInst.getUptime()
- stats['connectedNodes'] = '\n'.join(self._core.onionrInst.communicatorInst.onlinePeers)
+ stats['uptime'] = self.o_inst.communicatorInst.getUptime()
+ stats['connectedNodes'] = '\n'.join(self.o_inst.communicatorInst.onlinePeers)
stats['blockCount'] = len(blockmetadb.get_block_list())
- stats['blockQueueCount'] = len(self._core.onionrInst.communicatorInst.blockQueue)
+ stats['blockQueueCount'] = len(self.o_inst.communicatorInst.blockQueue)
return json.dumps(stats)
diff --git a/onionr/static-data/default-plugins/flow/main.py b/onionr/static-data/default-plugins/flow/main.py
index 16a7dc08..d98f6585 100755
--- a/onionr/static-data/default-plugins/flow/main.py
+++ b/onionr/static-data/default-plugins/flow/main.py
@@ -21,7 +21,7 @@
# Imports some useful libraries
import threading, time, locale, sys, os
from onionrblockapi import Block
-import logger, config
+import logger, config, onionrblocks
from onionrutils import escapeansi, epoch
locale.setlocale(locale.LC_ALL, '')
from coredb import blockmetadb
@@ -34,7 +34,6 @@ PLUGIN_VERSION = '0.0.1'
class OnionrFlow:
def __init__(self):
- self.myCore = pluginapi.get_core()
self.alreadyOutputed = []
self.flowRunning = False
self.channel = None
@@ -63,7 +62,7 @@ class OnionrFlow:
expireTime = epoch.get_epoch() + 43200
if len(message) > 0:
logger.info('Inserting message as block...', terminal=True)
- self.myCore.insertBlock(message, header='txt', expire=expireTime, meta={'ch': self.channel})
+ onionrblocks.insert(message, header='txt', expire=expireTime, meta={'ch': self.channel})
logger.info("Flow is exiting, goodbye", terminal=True)
return
diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py
index 9dbb8827..4e77750a 100755
--- a/onionr/storagecounter.py
+++ b/onionr/storagecounter.py
@@ -17,17 +17,16 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
-import config
+import config, filepaths
config.reload()
class StorageCounter:
- def __init__(self, coreInst):
- self._core = coreInst
- self.dataFile = self._core.usageFile
+ def __init__(self):
+ self.dataFile = filepaths.usage_file
return
def isFull(self):
retData = False
- if self._core.config.get('allocations.disk', 2000000000) <= (self.getAmount() + 1000):
+ if config.get('allocations.disk', 2000000000) <= (self.getAmount() + 1000):
retData = True
return retData
@@ -49,13 +48,13 @@ class StorageCounter:
def getPercent(self):
'''Return percent (decimal/float) of disk space we're using'''
amount = self.getAmount()
- return round(amount / self._core.config.get('allocations.disk', 2000000000), 2)
+ return round(amount / config.get('allocations.disk', 2000000000), 2)
def addBytes(self, amount):
'''Record that we are now using more disk space, unless doing so would exceed configured max'''
newAmount = amount + self.getAmount()
retData = newAmount
- if newAmount > self._core.config.get('allocations.disk', 2000000000):
+ if newAmount > config.get('allocations.disk', 2000000000):
retData = False
else:
self._update(newAmount)
diff --git a/onionr/subprocesspow.py b/onionr/subprocesspow.py
index 1ca2f9b1..674aea00 100755
--- a/onionr/subprocesspow.py
+++ b/onionr/subprocesspow.py
@@ -22,22 +22,18 @@
import subprocess, os
import multiprocessing, threading, time, json
from multiprocessing import Pipe, Process
-import core, onionrblockapi, config, onionrutils, logger, onionrproofs
+import onionrblockapi, config, onionrutils, logger, onionrproofs, onionrcrypto
from onionrutils import bytesconverter
-
+crypto = onionrcrypto.OnionrCrypto()
class SubprocessPOW:
- def __init__(self, data, metadata, core_inst=None, subproc_count=None):
+ def __init__(self, data, metadata, subproc_count=None):
'''
Onionr proof of work using multiple processes
- Accepts block data, block metadata
- and optionally an onionr core library instance.
+ Accepts block data, block metadata
if subproc_count is not set, os.cpu_count() is used to determine the number of processes
Do to Python GIL multiprocessing or use of external libraries is necessary to accelerate CPU bound tasks
'''
- # Option to accept existing core instance to save memory
- if core_inst is None:
- core_inst = core.Core()
# No known benefit to using more processes than there are cores.
# Note: os.cpu_count perhaps not always accurate
if subproc_count is None:
@@ -45,7 +41,6 @@ class SubprocessPOW:
self.subproc_count = subproc_count
self.result = ''
self.shutdown = False
- self.core_inst = core_inst
self.data = data
self.metadata = metadata
@@ -54,7 +49,7 @@ class SubprocessPOW:
self.data = bytesconverter.str_to_bytes(data)
# Calculate difficulty. Dumb for now, may use good algorithm in the future.
- self.difficulty = onionrproofs.getDifficultyForNewBlock(bytes(json_metadata + b'\n' + self.data), coreInst=self.core_inst)
+ self.difficulty = onionrproofs.getDifficultyForNewBlock(bytes(json_metadata + b'\n' + self.data)
logger.info('Computing POW (difficulty: %s)...' % self.difficulty)
@@ -101,7 +96,6 @@ class SubprocessPOW:
metadata = self.metadata
puzzle = self.puzzle
difficulty = self.difficulty
- mcore = core.Core() # I think we make a new core here because of multiprocess bugs
while True:
# Break if shutdown received
if pipe.poll() and pipe.recv() == 'shutdown':
@@ -111,7 +105,7 @@ class SubprocessPOW:
# Serialize metadata, combine with block data
payload = json.dumps(metadata).encode() + b'\n' + data
# Check sha3_256 hash of block, compare to puzzle. Send payload if puzzle finished
- token = mcore._crypto.sha3Hash(payload)
+ token = crypto.sha3Hash(payload)
token = bytesconverter.bytes_to_str(token) # ensure token is string
if puzzle == token[0:difficulty]:
pipe.send(payload)
diff --git a/onionr/utils/netutils.py b/onionr/utils/netutils.py
index 1c7bea1c..7bd285f2 100755
--- a/onionr/utils/netutils.py
+++ b/onionr/utils/netutils.py
@@ -18,7 +18,7 @@
along with this program. If not, see .
'''
from onionrutils import basicrequests
-def checkNetwork(core_inst, torPort=0):
+def checkNetwork(torPort=0):
'''Check if we are connected to the internet (through Tor)'''
retData = False
connectURLs = []
@@ -27,7 +27,7 @@ def checkNetwork(core_inst, torPort=0):
connectURLs = connectTest.read().split(',')
for url in connectURLs:
- if basicrequests.do_get_request(core_inst, url, port=torPort, ignoreAPI=True) != False:
+ if basicrequests.do_get_request(url, port=torPort, ignoreAPI=True) != False:
retData = True
break
except FileNotFoundError:
diff --git a/onionr/utils/networkmerger.py b/onionr/utils/networkmerger.py
index a074c62f..bd06cbf6 100755
--- a/onionr/utils/networkmerger.py
+++ b/onionr/utils/networkmerger.py
@@ -18,21 +18,25 @@
along with this program. If not, see .
'''
import logger
-def mergeAdders(newAdderList, coreInst):
+from coredb import keydb
+import config, onionrblacklist
+from utils import gettransports
+def mergeAdders(newAdderList):
'''
Merge peer adders list to our database
'''
+ blacklist = onionrblacklist.OnionrBlackList()
try:
retVal = False
if newAdderList != False:
for adder in newAdderList.split(','):
adder = adder.strip()
- if not adder in coreInst.listAdders(randomOrder = False) and adder != coreInst.hsAddress and not coreInst._blacklist.inBlacklist(adder):
- if not coreInst.config.get('tor.v3onions') and len(adder) == 62:
+ if not adder in keydb.listkeys.list_adders(randomOrder = False) and adder != gettransports.transports[0] and not blacklist.inBlacklist(adder):
+ if not config.get('tor.v3onions') and len(adder) == 62:
continue
- if coreInst.addAddress(adder):
- # Check if we have the maxmium amount of allowed stored peers
- if coreInst.config.get('peers.max_stored_peers') > len(coreInst.listAdders()):
+ if keydb.addkeys.add_address(adder):
+ # Check if we have the maximum amount of allowed stored peers
+ if config.get('peers.max_stored_peers') > len(keydb.listkeys.list_adders()):
logger.info('Added %s to db.' % adder, timestamp = True)
retVal = True
else: