progress in removing core

This commit is contained in:
Kevin Froman 2019-07-18 18:07:18 -05:00
parent 1775b96a04
commit dbbefafd19
22 changed files with 224 additions and 75 deletions

View File

@ -19,6 +19,7 @@
'''
import secrets
from etc import onionrvalues
import onionrblocks
def insert_deniable_block(comm_inst):
'''Insert a fake block in order to make it more difficult to track real blocks'''
fakePeer = ''
@ -27,5 +28,5 @@ def insert_deniable_block(comm_inst):
# This assumes on the libsodium primitives to have key-privacy
fakePeer = onionrvalues.DENIABLE_PEER_ADDRESS
data = secrets.token_hex(secrets.randbelow(1024) + 1)
comm_inst._core.insertBlock(data, header='pm', encryptType='asym', asymPeer=fakePeer, disableForward=True, meta={'subject': 'foo'})
onionrblocks.insert(data, header='pm', encryptType='asym', asymPeer=fakePeer, disableForward=True, meta={'subject': 'foo'})
comm_inst.decrementThreadCount('insert_deniable_block')

View File

@ -21,27 +21,28 @@ import sqlite3
import logger
from onionrusers import onionrusers
from onionrutils import epoch
from coredb import blockmetadb
from coredb import blockmetadb, dbfiles
from onionrstorage import removeblock, setdata
def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
# Delete expired blocks
for bHash in blockmetadb.get_expired_blocks():
comm_inst._core._blacklist.addToDB(bHash)
comm_inst._core.removeBlock(bHash)
comm_inst.blacklist.addToDB(bHash)
removeblock.remove_block(bHash)
logger.info('Deleted block: %s' % (bHash,))
while comm_inst._core.storage_counter.isFull():
oldest = blockmetadb.get_block_list()[0]
comm_inst._core._blacklist.addToDB(oldest)
comm_inst._core.removeBlock(oldest)
comm_inst.blacklist.addToDB(oldest)
removeblock.remove_block(oldest)
logger.info('Deleted block: %s' % (oldest,))
comm_inst.decrementThreadCount('clean_old_blocks')
def clean_keys(comm_inst):
'''Delete expired forward secrecy keys'''
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
time = epoch.get_epoch()
deleteKeys = []

View File

@ -20,6 +20,7 @@
import logger
from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers
from utils import gettransports
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
tryAmount = 1
@ -40,7 +41,7 @@ def lookup_new_peer_transports_with_communicator(comm_inst):
invalid = []
for x in newPeers:
x = x.strip()
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x == comm_inst._core.hsAddress:
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x == gettransports.transports[0]:
# avoid adding if its our address
invalid.append(x)
for x in invalid:

View File

@ -37,7 +37,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not comm_inst.isOnline:
break
# check if disk allocation is used
if comm_inst._core.storage_counter.isFull():
if comm_inst.storage_counter.isFull():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
@ -72,7 +72,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in comm_inst.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst._core._blacklist.inBlacklist(i):
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst.blacklist.inBlacklist(i):
if len(comm_inst.blockQueue) <= 1000000:
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
new_block_count += 1

View File

@ -24,15 +24,14 @@ from onionrutils import localcommand, epoch
def net_check(comm_inst):
'''Check if we are connected to the internet or not when we can't connect to any peers'''
rec = False # for detecting if we have received incoming connections recently
c = comm_inst._core
if len(comm_inst.onlinePeers) == 0:
try:
if (epoch.get_epoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60:
comm_inst.isOnline = True
rec = True
except ValueError:
pass
if not rec and not netutils.checkNetwork(c, torPort=comm_inst.proxyPort):
if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort):
if not comm_inst.shutdown:
logger.warn('Network check failed, are you connected to the Internet, and is Tor working?')
comm_inst.isOnline = False

View File

@ -27,7 +27,6 @@ class OnionrCommunicatorTimers:
self.requiresPeer = requiresPeer
self.daemonInstance = daemonInstance
self.maxThreads = maxThreads
self._core = self.daemonInstance._core
self.args = myArgs
self.daemonInstance.timers.append(self)

View File

@ -22,14 +22,13 @@ from onionrutils import stringvalidators, bytesconverter
from coredb import blockmetadb
def service_creator(daemon):
assert isinstance(daemon, communicator.OnionrCommunicatorDaemon)
core = daemon._core
# Find socket connection blocks
# TODO cache blocks and only look at recently received ones
con_blocks = blockmetadb.get_blocks_by_type('con')
for b in con_blocks:
if not b in daemon.active_services:
bl = onionrblockapi.Block(b, core=core, decrypt=True)
bl = onionrblockapi.Block(b, decrypt=True)
bs = bytesconverter.bytes_to_str(bl.bcontent) + '.onion'
if stringvalidators.validate_pub_key(bl.signer) and stringvalidators.validate_transport(bs):
signer = bytesconverter.bytes_to_str(bl.signer)

View File

@ -29,8 +29,7 @@ def upload_blocks_from_communicator(comm_inst):
triedPeers = []
finishedUploads = []
core = comm_inst._core
comm_inst.blocksToUpload = core._crypto.randomShuffle(comm_inst.blocksToUpload)
comm_inst.blocksToUpload = comm_inst.crypto.randomShuffle(comm_inst.blocksToUpload)
if len(comm_inst.blocksToUpload) != 0:
for bl in comm_inst.blocksToUpload:
if not stringvalidators.validate_hash(bl):
@ -46,8 +45,8 @@ def upload_blocks_from_communicator(comm_inst):
data = {'block': block.Block(bl).getRaw()}
proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block to " + peer, terminal=True)
if not basicrequests.do_post_request(core, url, data=data, proxyType=proxyType) == False:
localcommand.local_command(core, 'waitforshare/' + bl, post=True)
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False:
localcommand.local_command('waitforshare/' + bl, post=True)
finishedUploads.append(bl)
for x in finishedUploads:
try:

View File

@ -21,20 +21,22 @@ import sqlite3
import onionrevents as events
from onionrutils import stringvalidators
from . import listkeys
def add_peer(core_inst, peerID, name=''):
from utils import gettransports
from .. import dbfiles
def add_peer(peerID, name=''):
'''
Adds a public key to the key database (misleading function name)
'''
if peerID in core_inst.listPeers() or peerID == core_inst._crypto.pubKey:
if peerID in listkeys.list_peers() or peerID == core_inst._crypto.pubKey:
raise ValueError("specified id is already known")
# This function simply adds a peer to the DB
if not stringvalidators.validate_pub_key(peerID):
return False
events.event('pubkey_add', data = {'key': peerID}, onionr = core_inst.onionrInst)
#events.event('pubkey_add', data = {'key': peerID}, onionr = core_inst.onionrInst)
conn = sqlite3.connect(core_inst.peerDB, timeout=30)
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=30)
hashID = core_inst._crypto.pubKeyHashID(peerID)
c = conn.cursor()
t = (peerID, name, 'unknown', hashID, 0)
@ -54,7 +56,7 @@ def add_peer(core_inst, peerID, name=''):
return True
def add_address(core_inst, address):
def add_address(address):
'''
Add an address to the address database (only tor currently)
'''
@ -62,9 +64,9 @@ def add_address(core_inst, address):
if type(address) is None or len(address) == 0:
return False
if stringvalidators.validate_transport(address):
if address == core_inst.config.get('i2p.ownAddr', None) or address == core_inst.hsAddress:
if address == gettransports.transports[0]:
return False
conn = sqlite3.connect(core_inst.addressDB, timeout=30)
conn = sqlite3.connect(dbfiles.address_info_db, timeout=30)
c = conn.cursor()
# check if address is in database
# this is safe to do because the address is validated above, but we strip some chars here too just in case
@ -84,7 +86,7 @@ def add_address(core_inst, address):
conn.commit()
conn.close()
events.event('address_add', data = {'address': address}, onionr = core_inst.onionrInst)
#events.event('address_add', data = {'address': address}, onionr = core_inst.onionrInst)
return True
else:

View File

@ -21,6 +21,7 @@ import sqlite3
import logger
from onionrutils import epoch
from .. import dbfiles
from . import userinfo
def list_peers(randomOrder=True, getPow=False, trust=0):
'''
Return a list of public keys (misleading function name)
@ -58,11 +59,11 @@ def list_peers(randomOrder=True, getPow=False, trust=0):
return peerList
def list_adders(core_inst, randomOrder=True, i2p=True, recent=0):
def list_adders(randomOrder=True, i2p=True, recent=0):
'''
Return a list of transport addresses
'''
conn = sqlite3.connect(core_inst.addressDB, timeout=30)
conn = sqlite3.connect(dbfiles.address_info_db, timeout=30)
c = conn.cursor()
if randomOrder:
addresses = c.execute('SELECT * FROM adders ORDER BY RANDOM();')
@ -77,7 +78,7 @@ def list_adders(core_inst, randomOrder=True, i2p=True, recent=0):
testList = list(addressList) # create new list to iterate
for address in testList:
try:
if recent > 0 and (epoch.get_epoch() - core_inst.getAddressInfo(address, 'lastConnect')) > recent:
if recent > 0 and (epoch.get_epoch() - userinfo.get_user_info(address, 'lastConnect')) > recent:
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
except TypeError:
addressList.remove(address)

View File

@ -20,21 +20,21 @@
import sqlite3
import onionrevents as events
from onionrutils import stringvalidators
def remove_address(core_inst, address):
from .. import dbfiles
def remove_address(address):
'''
Remove an address from the address database
'''
if stringvalidators.validate_transport(address):
conn = sqlite3.connect(core_inst.addressDB, timeout=30)
conn = sqlite3.connect(dbfiles.address_info_db, timeout=30)
c = conn.cursor()
t = (address,)
c.execute('Delete from adders where address=?;', t)
conn.commit()
conn.close()
events.event('address_remove', data = {'address': address}, onionr = core_inst.onionrInst)
#events.event('address_remove', data = {'address': address}, onionr = core_inst.onionrInst)
return True
else:
return False

View File

@ -18,7 +18,8 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3
def get_address_info(core_inst, address, info):
from .. import dbfiles
def get_address_info(address, info):
'''
Get info about an address from its database entry
@ -34,7 +35,7 @@ def get_address_info(core_inst, address, info):
introduced 9
'''
conn = sqlite3.connect(core_inst.addressDB, timeout=30)
conn = sqlite3.connect(dbfiles.address_info_db, timeout=30)
c = conn.cursor()
command = (address,)
@ -54,12 +55,12 @@ def get_address_info(core_inst, address, info):
return retVal
def set_address_info(core_inst, address, key, data):
def set_address_info(address, key, data):
'''
Update an address for a key
'''
conn = sqlite3.connect(core_inst.addressDB, timeout=30)
conn = sqlite3.connect(dbfiles.address_info_db, timeout=30)
c = conn.cursor()
command = (data, address)

View File

@ -18,7 +18,8 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3
def get_user_info(core_inst, peer, info):
from .. import dbfiles
def get_user_info(peer, info):
'''
Get info about a peer from their database entry
@ -29,7 +30,7 @@ def get_user_info(core_inst, peer, info):
trust int 4
hashID text 5
'''
conn = sqlite3.connect(core_inst.peerDB, timeout=30)
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=30)
c = conn.cursor()
command = (peer,)
@ -50,12 +51,12 @@ def get_user_info(core_inst, peer, info):
return retVal
def set_peer_info(core_inst, peer, key, data):
def set_peer_info(peer, key, data):
'''
Update a peer for a key
'''
conn = sqlite3.connect(core_inst.peerDB, timeout=30)
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=30)
c = conn.cursor()
command = (data, peer)

View File

@ -18,9 +18,9 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import platform
def use_subprocess(core_inst):
def use_subprocess(config_inst):
use = True
if not core_inst.config.get('general.use_subprocess_pow_if_possible', True):
if not config_inst.get('general.use_subprocess_pow_if_possible', True):
use = False
if 'Windows' == platform.system():
use = False

View File

@ -12,4 +12,6 @@ forward_keys_file = home + 'forward-keys.db'
tor_hs_address_file = home + 'hs/hostname'
run_check_file = home + '.runcheck'
run_check_file = home + '.runcheck'
data_nonce_file = home + 'block-nonces.dat'

View File

@ -1,21 +1,13 @@
import json
import core, onionrblockapi
import onionrblockapi
from onionrutils import bytesconverter, stringvalidators
class GetBlockData:
def __init__(self, client_api_inst=None):
if client_api_inst is None:
self.client_api_inst = None
self.c = core.Core()
elif isinstance(client_api_inst, core.Core):
self.client_api_inst = None
self.c = client_api_inst
else:
self.client_api_Inst = client_api_inst
self.c = core.Core()
def __init__(self, client_api_inst):
self.client_api_inst = client_api_inst
def get_block_data(self, bHash, decrypt=False, raw=False, headerOnly=False):
assert stringvalidators.validate_hash(bHash)
bl = onionrblockapi.Block(bHash, core=self.c)
bl = onionrblockapi.Block(bHash)
if decrypt:
bl.decrypt()
if bl.isEncrypted and not bl.decrypted:

View File

@ -18,7 +18,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from flask import Blueprint, Response
import core, onionrblockapi, onionrexceptions
import onionrblockapi, onionrexceptions
from onionrutils import stringvalidators
from coredb import daemonqueue
shutdown_bp = Blueprint('shutdown', __name__)

View File

@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import core, json
import json
from onionrusers import contactmanager
from flask import Blueprint, Response, request, abort, redirect
@ -26,31 +26,30 @@ friends = Blueprint('friends', __name__)
@friends.route('/friends/list')
def list_friends():
pubkey_list = {}
c = core.Core()
friend_list = contactmanager.ContactManager.list_friends(c)
friend_list = contactmanager.ContactManager.list_friends()
for friend in friend_list:
pubkey_list[friend.publicKey] = {'name': friend.get_info('name')}
return json.dumps(pubkey_list)
@friends.route('/friends/add/<pubkey>', methods=['POST'])
def add_friend(pubkey):
contactmanager.ContactManager(core.Core(), pubkey, saveUser=True).setTrust(1)
contactmanager.ContactManager(pubkey, saveUser=True).setTrust(1)
return redirect(request.referrer + '#' + request.form['token'])
@friends.route('/friends/remove/<pubkey>', methods=['POST'])
def remove_friend(pubkey):
contactmanager.ContactManager(core.Core(), pubkey).setTrust(0)
contactmanager.ContactManager(pubkey).setTrust(0)
return redirect(request.referrer + '#' + request.form['token'])
@friends.route('/friends/setinfo/<pubkey>/<key>', methods=['POST'])
def set_info(pubkey, key):
data = request.form['data']
contactmanager.ContactManager(core.Core(), pubkey).set_info(key, data)
contactmanager.ContactManager(pubkey).set_info(key, data)
return redirect(request.referrer + '#' + request.form['token'])
@friends.route('/friends/getinfo/<pubkey>/<key>')
def get_info(pubkey, key):
retData = contactmanager.ContactManager(core.Core(), pubkey).get_info(key)
retData = contactmanager.ContactManager(pubkey).get_info(key)
if retData is None:
abort(404)
else:

View File

@ -23,12 +23,12 @@ class PrivateEndpoints:
def __init__(self, client_api):
private_endpoints_bp = Blueprint('privateendpoints', __name__)
self.private_endpoints_bp = private_endpoints_bp
config = client_api._core.config
config = client_api.config
@private_endpoints_bp.route('/serviceactive/<pubkey>')
def serviceActive(pubkey):
try:
if pubkey in client_api._core.onionrInst.communicatorInst.active_services:
if pubkey in client_api.onionrInst.communicatorInst.active_services:
return Response('true')
except AttributeError as e:
pass
@ -95,7 +95,7 @@ class PrivateEndpoints:
#return Response("disabled")
while True:
try:
return Response(client_api._core.serializer.getStats())
return Response(client_api.serializer.getStats())
except AttributeError:
pass
@ -105,7 +105,7 @@ class PrivateEndpoints:
@private_endpoints_bp.route('/getActivePubkey')
def getActivePubkey():
return Response(client_api._core._crypto.pubKey)
return Response(onionrcrypto.OnionrCrypto().pubKey)
@private_endpoints_bp.route('/getHumanReadable/<name>')
def getHumanReadable(name):

View File

@ -18,14 +18,12 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from flask import Blueprint, Response, abort
import core, onionrblockapi
import onionrblockapi
from httpapi import apiutils
from onionrutils import stringvalidators
from coredb import blockmetadb
c = core.Core()
client_get_block = apiutils.GetBlockData(c)
client_get_block = apiutils.GetBlockData()
client_get_blocks = Blueprint('miscclient', __name__)
@ -39,7 +37,7 @@ def getBlockBodyData(name):
resp = ''
if stringvalidators.validate_hash(name):
try:
resp = onionrblockapi.Block(name, decrypt=True, core=c).bcontent
resp = onionrblockapi.Block(name, decrypt=True).bcontent
except TypeError:
pass
else:

View File

@ -0,0 +1 @@
from . import insert

View File

@ -0,0 +1,153 @@
import json
from onionrutils import bytesconverter, epoch
import storagecounter, filepaths, onionrvalues, onionrstorage
import onionrevents as events
from etc import powchoice
crypto = onionrcrypto.OnionrCrypto()
use_subprocess = powchoice.use_subprocess()
def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False):
'''
Inserts a block into the network
encryptType must be specified to encrypt a block
'''
requirements = onionrvalues.OnionrValues()
storage_counter = storagecounter.StorageCounter()
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
if storage_counter.isFull():
logger.error(allocationReachedMessage)
return False
retData = False
if type(data) is None:
raise ValueError('Data cannot be none')
createTime = epoch.get_epoch()
dataNonce = bytesconverter.bytes_to_str(crypto.sha3Hash(data))
try:
with open(filepaths.data_nonce_file, 'r') as nonces:
if dataNonce in nonces:
return retData
except FileNotFoundError:
pass
# record nonce
with open(filepaths.data_nonce_file, 'a') as nonceFile:
nonceFile.write(dataNonce + '\n')
if type(data) is bytes:
data = data.decode()
data = str(data)
plaintext = data
plaintextMeta = {}
plaintextPeer = asymPeer
retData = ''
signature = ''
signer = ''
metadata = {}
# metadata is full block metadata, meta is internal, user specified metadata
# only use header if not set in provided meta
meta['type'] = str(header)
if encryptType in ('asym', 'sym', ''):
metadata['encryptType'] = encryptType
else:
raise onionrexceptions.InvalidMetadata('encryptType must be asym or sym, or blank')
try:
data = data.encode()
except AttributeError:
pass
if encryptType == 'asym':
meta['rply'] = createTime # Duplicate the time in encrypted messages to prevent replays
if not disableForward and sign and asymPeer != crypto.pubKey:
try:
forwardEncrypted = onionrusers.OnionrUser(asymPeer).forwardEncrypt(data)
data = forwardEncrypted[0]
meta['forwardEnc'] = True
expire = forwardEncrypted[2] # Expire time of key. no sense keeping block after that
except onionrexceptions.InvalidPubkey:
pass
#onionrusers.OnionrUser(self, asymPeer).generateForwardKey()
fsKey = onionrusers.OnionrUser(asymPeer).generateForwardKey()
#fsKey = onionrusers.OnionrUser(self, asymPeer).getGeneratedForwardKeys().reverse()
meta['newFSKey'] = fsKey
jsonMeta = json.dumps(meta)
plaintextMeta = jsonMeta
if sign:
signature = crypto.edSign(jsonMeta.encode() + data, key=crypto.privKey, encodeResult=True)
signer = crypto.pubKey
if len(jsonMeta) > 1000:
raise onionrexceptions.InvalidMetadata('meta in json encoded form must not exceed 1000 bytes')
user = onionrusers.OnionrUser(symKey)
# encrypt block metadata/sig/content
if encryptType == 'sym':
if len(symKey) < requirements.passwordLength:
raise onionrexceptions.SecurityError('Weak encryption key')
jsonMeta = crypto.symmetricEncrypt(jsonMeta, key=symKey, returnEncoded=True).decode()
data = crypto.symmetricEncrypt(data, key=symKey, returnEncoded=True).decode()
signature = crypto.symmetricEncrypt(signature, key=symKey, returnEncoded=True).decode()
signer = crypto.symmetricEncrypt(signer, key=symKey, returnEncoded=True).decode()
elif encryptType == 'asym':
if stringvalidators.validate_pub_key(asymPeer):
# Encrypt block data with forward secrecy key first, but not meta
jsonMeta = json.dumps(meta)
jsonMeta = crypto.pubKeyEncrypt(jsonMeta, asymPeer, encodedData=True).decode()
data = crypto.pubKeyEncrypt(data, asymPeer, encodedData=True).decode()
signature = crypto.pubKeyEncrypt(signature, asymPeer, encodedData=True).decode()
signer = crypto.pubKeyEncrypt(signer, asymPeer, encodedData=True).decode()
try:
onionrusers.OnionrUser(asymPeer, saveUser=True)
except ValueError:
# if peer is already known
pass
else:
raise onionrexceptions.InvalidPubkey(asymPeer + ' is not a valid base32 encoded ed25519 key')
# compile metadata
metadata['meta'] = jsonMeta
metadata['sig'] = signature
metadata['signer'] = signer
metadata['time'] = createTime
# ensure expire is integer and of sane length
if type(expire) is not type(None):
assert len(str(int(expire))) < 14
metadata['expire'] = expire
# send block data (and metadata) to POW module to get tokenized block data
if use_subprocess:
payload = subprocesspow.SubprocessPOW(data, metadata).start()
else:
payload = onionrproofs.POW(metadata, data).waitForResult()
if payload != False:
try:
retData = onionrstorage.setdata.set_data(data)
except onionrexceptions.DiskAllocationReached:
logger.error(allocationReachedMessage)
retData = False
else:
# Tell the api server through localCommand to wait for the daemon to upload this block to make statistical analysis more difficult
if localcommand.local_command('/ping', maxWait=10) == 'pong!':
if config.get('general.security_level', 1) == 0:
localcommand.local_command('/waitforshare/' + retData, post=True, maxWait=5)
coredb.daemonqueue.daemon_queue_add('uploadBlock', retData)
else:
pass
coredb.blockmetadb.add_to_block_DB(retData, selfInsert=True, dataSaved=True)
coredb.blockmetadata.process_block_metadata(retData)
'''
if retData != False:
if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:
events.event('insertdeniable', {'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer)}, onionr = self.onionrInst, threaded = True)
else:
events.event('insertblock', {'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer)}, onionr = self.onionrInst, threaded = True)
'''
return retData