OnionrUtils fully removed (but not fully bug free)
flow now uses daemon thread for displaying output
This commit is contained in:
parent
909c002dc4
commit
c7e06205b7
@ -23,11 +23,12 @@ from gevent import Timeout
|
|||||||
import flask
|
import flask
|
||||||
from flask import request, Response, abort, send_from_directory
|
from flask import request, Response, abort, send_from_directory
|
||||||
import core
|
import core
|
||||||
import onionrutils, onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config, onionrblockapi
|
import onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config, onionrblockapi
|
||||||
import httpapi
|
import httpapi
|
||||||
from httpapi import friendsapi, profilesapi, configapi, miscpublicapi
|
from httpapi import friendsapi, profilesapi, configapi, miscpublicapi
|
||||||
from onionrservices import httpheaders
|
from onionrservices import httpheaders
|
||||||
import onionr
|
import onionr
|
||||||
|
from onionrutils import bytesconverter, stringvalidators, epoch, mnemonickeys
|
||||||
|
|
||||||
config.reload()
|
config.reload()
|
||||||
class FDSafeHandler(WSGIHandler):
|
class FDSafeHandler(WSGIHandler):
|
||||||
@ -98,7 +99,7 @@ class PublicAPI:
|
|||||||
resp = httpheaders.set_default_onionr_http_headers(resp)
|
resp = httpheaders.set_default_onionr_http_headers(resp)
|
||||||
# Network API version
|
# Network API version
|
||||||
resp.headers['X-API'] = onionr.API_VERSION
|
resp.headers['X-API'] = onionr.API_VERSION
|
||||||
self.lastRequest = clientAPI._core._utils.getRoundedEpoch(roundS=5)
|
self.lastRequest = epoch.get_rounded_epoch(roundS=5)
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
@ -177,9 +178,8 @@ class API:
|
|||||||
|
|
||||||
self.debug = debug
|
self.debug = debug
|
||||||
self._core = onionrInst.onionrCore
|
self._core = onionrInst.onionrCore
|
||||||
self.startTime = self._core._utils.getEpoch()
|
self.startTime = epoch.get_epoch()
|
||||||
self._crypto = onionrcrypto.OnionrCrypto(self._core)
|
self._crypto = onionrcrypto.OnionrCrypto(self._core)
|
||||||
self._utils = onionrutils.OnionrUtils(self._core)
|
|
||||||
app = flask.Flask(__name__)
|
app = flask.Flask(__name__)
|
||||||
bindPort = int(config.get('client.client.port', 59496))
|
bindPort = int(config.get('client.client.port', 59496))
|
||||||
self.bindPort = bindPort
|
self.bindPort = bindPort
|
||||||
@ -334,7 +334,7 @@ class API:
|
|||||||
@app.route('/getblockbody/<name>')
|
@app.route('/getblockbody/<name>')
|
||||||
def getBlockBodyData(name):
|
def getBlockBodyData(name):
|
||||||
resp = ''
|
resp = ''
|
||||||
if self._core._utils.validateHash(name):
|
if stringvalidators.validate_hash(name):
|
||||||
try:
|
try:
|
||||||
resp = onionrblockapi.Block(name, decrypt=True).bcontent
|
resp = onionrblockapi.Block(name, decrypt=True).bcontent
|
||||||
except TypeError:
|
except TypeError:
|
||||||
@ -346,7 +346,7 @@ class API:
|
|||||||
@app.route('/getblockdata/<name>')
|
@app.route('/getblockdata/<name>')
|
||||||
def getData(name):
|
def getData(name):
|
||||||
resp = ""
|
resp = ""
|
||||||
if self._core._utils.validateHash(name):
|
if stringvalidators.validate_hash(name):
|
||||||
if name in self._core.getBlockList():
|
if name in self._core.getBlockList():
|
||||||
try:
|
try:
|
||||||
resp = self.getBlockData(name, decrypt=True)
|
resp = self.getBlockData(name, decrypt=True)
|
||||||
@ -371,7 +371,7 @@ class API:
|
|||||||
def site(name):
|
def site(name):
|
||||||
bHash = name
|
bHash = name
|
||||||
resp = 'Not Found'
|
resp = 'Not Found'
|
||||||
if self._core._utils.validateHash(bHash):
|
if stringvalidators.validate_hash(bHash):
|
||||||
try:
|
try:
|
||||||
resp = onionrblockapi.Block(bHash).bcontent
|
resp = onionrblockapi.Block(bHash).bcontent
|
||||||
except onionrexceptions.NoDataAvailable:
|
except onionrexceptions.NoDataAvailable:
|
||||||
@ -432,7 +432,7 @@ class API:
|
|||||||
|
|
||||||
@app.route('/getHumanReadable/<name>')
|
@app.route('/getHumanReadable/<name>')
|
||||||
def getHumanReadable(name):
|
def getHumanReadable(name):
|
||||||
return Response(self._core._utils.getHumanReadableID(name))
|
return Response(mnemonickeys.get_human_readable_ID(name))
|
||||||
|
|
||||||
@app.route('/insertblock', methods=['POST'])
|
@app.route('/insertblock', methods=['POST'])
|
||||||
def insertBlock():
|
def insertBlock():
|
||||||
@ -497,13 +497,13 @@ class API:
|
|||||||
def getUptime(self):
|
def getUptime(self):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return self._utils.getEpoch() - self.startTime
|
return epoch.get_epoch() - self.startTime
|
||||||
except (AttributeError, NameError):
|
except (AttributeError, NameError):
|
||||||
# Don't error on race condition with startup
|
# Don't error on race condition with startup
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def getBlockData(self, bHash, decrypt=False, raw=False, headerOnly=False):
|
def getBlockData(self, bHash, decrypt=False, raw=False, headerOnly=False):
|
||||||
assert self._core._utils.validateHash(bHash)
|
assert stringvalidators.validate_hash(bHash)
|
||||||
bl = onionrblockapi.Block(bHash, core=self._core)
|
bl = onionrblockapi.Block(bHash, core=self._core)
|
||||||
if decrypt:
|
if decrypt:
|
||||||
bl.decrypt()
|
bl.decrypt()
|
||||||
@ -520,8 +520,8 @@ class API:
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
validSig = False
|
validSig = False
|
||||||
signer = onionrutils.bytes_to_str(bl.signer)
|
signer = bytesconverter.bytes_to_str(bl.signer)
|
||||||
if bl.isSigned() and onionrutils.stringvalidators.validate_pub_key(signer) and bl.isSigner(signer):
|
if bl.isSigned() and stringvalidators.validate_pub_key(signer) and bl.isSigner(signer):
|
||||||
validSig = True
|
validSig = True
|
||||||
bl.bheader['validSig'] = validSig
|
bl.bheader['validSig'] = validSig
|
||||||
bl.bheader['meta'] = ''
|
bl.bheader['meta'] = ''
|
||||||
|
@ -27,7 +27,7 @@ from communicatorutils import downloadblocks, lookupblocks, lookupadders
|
|||||||
from communicatorutils import servicecreator, connectnewpeers, uploadblocks
|
from communicatorutils import servicecreator, connectnewpeers, uploadblocks
|
||||||
from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
|
from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
|
||||||
from communicatorutils import cooldownpeer, housekeeping, netcheck
|
from communicatorutils import cooldownpeer, housekeeping, netcheck
|
||||||
from onionrutils import localcommand
|
from onionrutils import localcommand, epoch, basicrequests
|
||||||
from etc import humanreadabletime
|
from etc import humanreadabletime
|
||||||
import onionrservices, onionr, onionrproofs
|
import onionrservices, onionr, onionrproofs
|
||||||
|
|
||||||
@ -91,7 +91,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
plugins.reload()
|
plugins.reload()
|
||||||
|
|
||||||
# time app started running for info/statistics purposes
|
# time app started running for info/statistics purposes
|
||||||
self.startTime = self._core._utils.getEpoch()
|
self.startTime = epoch.get_epoch()
|
||||||
|
|
||||||
if developmentMode:
|
if developmentMode:
|
||||||
OnionrCommunicatorTimers(self, self.heartbeat, 30)
|
OnionrCommunicatorTimers(self, self.heartbeat, 30)
|
||||||
@ -310,9 +310,9 @@ class OnionrCommunicatorDaemon:
|
|||||||
if len(data) > 0:
|
if len(data) > 0:
|
||||||
url += '&data=' + data
|
url += '&data=' + data
|
||||||
|
|
||||||
self._core.setAddressInfo(peer, 'lastConnectAttempt', self._core._utils.getEpoch()) # mark the time we're trying to request this peer
|
self._core.setAddressInfo(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
|
||||||
|
|
||||||
retData = self._core._utils.doGetRequest(url, port=self.proxyPort)
|
retData = basicrequests.do_get_request(self._core, url, port=self.proxyPort)
|
||||||
# if request failed, (error), mark peer offline
|
# if request failed, (error), mark peer offline
|
||||||
if retData == False:
|
if retData == False:
|
||||||
try:
|
try:
|
||||||
@ -324,7 +324,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self._core.setAddressInfo(peer, 'lastConnect', self._core._utils.getEpoch())
|
self._core.setAddressInfo(peer, 'lastConnect', epoch.get_epoch())
|
||||||
self.getPeerProfileInstance(peer).addScore(1)
|
self.getPeerProfileInstance(peer).addScore(1)
|
||||||
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
|
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
|
||||||
|
|
||||||
@ -341,7 +341,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
return retData
|
return retData
|
||||||
|
|
||||||
def getUptime(self):
|
def getUptime(self):
|
||||||
return self._core._utils.getEpoch() - self.startTime
|
return epoch.get_epoch() - self.startTime
|
||||||
|
|
||||||
def heartbeat(self):
|
def heartbeat(self):
|
||||||
'''Show a heartbeat debug message'''
|
'''Show a heartbeat debug message'''
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
import base64
|
import base64
|
||||||
import onionrproofs, logger
|
import onionrproofs, logger
|
||||||
from etc import onionrvalues
|
from etc import onionrvalues
|
||||||
|
from onionrutils import basicrequests
|
||||||
|
|
||||||
def announce_node(daemon):
|
def announce_node(daemon):
|
||||||
'''Announce our node to our peers'''
|
'''Announce our node to our peers'''
|
||||||
@ -75,8 +76,8 @@ def announce_node(daemon):
|
|||||||
daemon.announceCache[peer] = data['random']
|
daemon.announceCache[peer] = data['random']
|
||||||
if not announceFail:
|
if not announceFail:
|
||||||
logger.info('Announcing node to ' + url)
|
logger.info('Announcing node to ' + url)
|
||||||
if daemon._core._utils.doPostRequest(url, data) == 'Success':
|
if basicrequests.do_post_request(daemon._core, url, data) == 'Success':
|
||||||
logger.info('Successfully introduced node to ' + peer)
|
logger.info('Successfully introduced node to ' + peer, terminal=True)
|
||||||
retData = True
|
retData = True
|
||||||
daemon._core.setAddressInfo(peer, 'introduced', 1)
|
daemon._core.setAddressInfo(peer, 'introduced', 1)
|
||||||
daemon._core.setAddressInfo(peer, 'powValue', data['random'])
|
daemon._core.setAddressInfo(peer, 'powValue', data['random'])
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import time, sys
|
import time, sys
|
||||||
import onionrexceptions, logger, onionrpeers
|
import onionrexceptions, logger, onionrpeers
|
||||||
from utils import networkmerger
|
from utils import networkmerger
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch
|
||||||
# secrets module was added into standard lib in 3.6+
|
# secrets module was added into standard lib in 3.6+
|
||||||
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
|
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
|
||||||
from dependencies import secrets
|
from dependencies import secrets
|
||||||
@ -75,7 +75,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||||||
if address not in comm_inst.onlinePeers:
|
if address not in comm_inst.onlinePeers:
|
||||||
logger.info('Connected to ' + address, terminal=True)
|
logger.info('Connected to ' + address, terminal=True)
|
||||||
comm_inst.onlinePeers.append(address)
|
comm_inst.onlinePeers.append(address)
|
||||||
comm_inst.connectTimes[address] = comm_inst._core._utils.getEpoch()
|
comm_inst.connectTimes[address] = epoch.get_epoch()
|
||||||
retData = address
|
retData = address
|
||||||
|
|
||||||
# add peer to profile list if they're not in it
|
# add peer to profile list if they're not in it
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
You should have received a copy of the GNU General Public License
|
You should have received a copy of the GNU General Public License
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
|
from onionrutils import epoch
|
||||||
def cooldown_peer(comm_inst):
|
def cooldown_peer(comm_inst):
|
||||||
'''Randomly add an online peer to cooldown, so we can connect a new one'''
|
'''Randomly add an online peer to cooldown, so we can connect a new one'''
|
||||||
onlinePeerAmount = len(comm_inst.onlinePeers)
|
onlinePeerAmount = len(comm_inst.onlinePeers)
|
||||||
@ -28,7 +29,7 @@ def cooldown_peer(comm_inst):
|
|||||||
# Remove peers from cooldown that have been there long enough
|
# Remove peers from cooldown that have been there long enough
|
||||||
tempCooldown = dict(comm_inst.cooldownPeer)
|
tempCooldown = dict(comm_inst.cooldownPeer)
|
||||||
for peer in tempCooldown:
|
for peer in tempCooldown:
|
||||||
if (comm_inst._core._utils.getEpoch() - tempCooldown[peer]) >= cooldownTime:
|
if (epoch.get_epoch() - tempCooldown[peer]) >= cooldownTime:
|
||||||
del comm_inst.cooldownPeer[peer]
|
del comm_inst.cooldownPeer[peer]
|
||||||
|
|
||||||
# Cool down a peer, if we have max connections alive for long enough
|
# Cool down a peer, if we have max connections alive for long enough
|
||||||
@ -38,7 +39,7 @@ def cooldown_peer(comm_inst):
|
|||||||
while finding:
|
while finding:
|
||||||
try:
|
try:
|
||||||
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
|
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
|
||||||
if (comm_inst._core._utils.getEpoch() - tempConnectTimes[toCool]) < minTime:
|
if (epoch.get_epoch() - tempConnectTimes[toCool]) < minTime:
|
||||||
del tempConnectTimes[toCool]
|
del tempConnectTimes[toCool]
|
||||||
else:
|
else:
|
||||||
finding = False
|
finding = False
|
||||||
@ -46,6 +47,6 @@ def cooldown_peer(comm_inst):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
comm_inst.removeOnlinePeer(toCool)
|
comm_inst.removeOnlinePeer(toCool)
|
||||||
comm_inst.cooldownPeer[toCool] = comm_inst._core._utils.getEpoch()
|
comm_inst.cooldownPeer[toCool] = epoch.get_epoch()
|
||||||
|
|
||||||
comm_inst.decrementThreadCount('cooldown_peer')
|
comm_inst.decrementThreadCount('cooldown_peer')
|
@ -19,7 +19,7 @@
|
|||||||
'''
|
'''
|
||||||
import communicator, onionrexceptions
|
import communicator, onionrexceptions
|
||||||
import logger, onionrpeers
|
import logger, onionrpeers
|
||||||
from onionrutils import blockmetadata
|
from onionrutils import blockmetadata, stringvalidators, validatemetadata
|
||||||
|
|
||||||
def download_blocks_from_communicator(comm_inst):
|
def download_blocks_from_communicator(comm_inst):
|
||||||
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
|
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
|
||||||
@ -48,7 +48,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||||||
continue
|
continue
|
||||||
if comm_inst._core._blacklist.inBlacklist(blockHash):
|
if comm_inst._core._blacklist.inBlacklist(blockHash):
|
||||||
continue
|
continue
|
||||||
if comm_inst._core._utils.storageCounter.isFull():
|
if comm_inst._core.storage_counter.isFull():
|
||||||
break
|
break
|
||||||
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
||||||
if len(blockPeers) == 0:
|
if len(blockPeers) == 0:
|
||||||
@ -75,7 +75,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||||||
content = content.decode() # decode here because sha3Hash needs bytes above
|
content = content.decode() # decode here because sha3Hash needs bytes above
|
||||||
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
||||||
metadata = metas[0]
|
metadata = metas[0]
|
||||||
if comm_inst._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
if validatemetadata.validate_metadata(comm_inist._core, metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||||
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
|
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
|
||||||
logger.info('Attempting to save block %s...' % blockHash[:12])
|
logger.info('Attempting to save block %s...' % blockHash[:12])
|
||||||
try:
|
try:
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
import logger
|
import logger
|
||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
|
from onionrutils import epoch
|
||||||
def clean_old_blocks(comm_inst):
|
def clean_old_blocks(comm_inst):
|
||||||
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
|
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
|
||||||
|
|
||||||
@ -29,7 +30,7 @@ def clean_old_blocks(comm_inst):
|
|||||||
comm_inst._core.removeBlock(bHash)
|
comm_inst._core.removeBlock(bHash)
|
||||||
logger.info('Deleted block: %s' % (bHash,))
|
logger.info('Deleted block: %s' % (bHash,))
|
||||||
|
|
||||||
while comm_inst._core._utils.storageCounter.isFull():
|
while comm_inst._core.storage_counter.isFull():
|
||||||
oldest = comm_inst._core.getBlockList()[0]
|
oldest = comm_inst._core.getBlockList()[0]
|
||||||
comm_inst._core._blacklist.addToDB(oldest)
|
comm_inst._core._blacklist.addToDB(oldest)
|
||||||
comm_inst._core.removeBlock(oldest)
|
comm_inst._core.removeBlock(oldest)
|
||||||
@ -41,7 +42,7 @@ def clean_keys(comm_inst):
|
|||||||
'''Delete expired forward secrecy keys'''
|
'''Delete expired forward secrecy keys'''
|
||||||
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
|
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
time = comm_inst._core._utils.getEpoch()
|
time = epoch.get_epoch()
|
||||||
deleteKeys = []
|
deleteKeys = []
|
||||||
|
|
||||||
for entry in c.execute("SELECT * FROM forwardKeys WHERE expire <= ?", (time,)):
|
for entry in c.execute("SELECT * FROM forwardKeys WHERE expire <= ?", (time,)):
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
import logger, onionrproofs
|
import logger, onionrproofs
|
||||||
|
from onionrutils import stringvalidators, epoch
|
||||||
|
|
||||||
def lookup_blocks_from_communicator(comm_inst):
|
def lookup_blocks_from_communicator(comm_inst):
|
||||||
logger.info('Looking up new blocks...')
|
logger.info('Looking up new blocks...')
|
||||||
tryAmount = 2
|
tryAmount = 2
|
||||||
@ -34,7 +36,7 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||||||
if not comm_inst.isOnline:
|
if not comm_inst.isOnline:
|
||||||
break
|
break
|
||||||
# check if disk allocation is used
|
# check if disk allocation is used
|
||||||
if comm_inst._core._utils.storageCounter.isFull():
|
if comm_inst._core.storage_counter.isFull():
|
||||||
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||||
break
|
break
|
||||||
peer = comm_inst.pickOnlinePeer() # select random online peer
|
peer = comm_inst.pickOnlinePeer() # select random online peer
|
||||||
@ -60,11 +62,11 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||||||
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
||||||
newBlocks = False
|
newBlocks = False
|
||||||
else:
|
else:
|
||||||
comm_inst.dbTimestamps[peer] = comm_inst._core._utils.getRoundedEpoch(roundS=60)
|
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
|
||||||
if newBlocks != False:
|
if newBlocks != False:
|
||||||
# if request was a success
|
# if request was a success
|
||||||
for i in newBlocks.split('\n'):
|
for i in newBlocks.split('\n'):
|
||||||
if comm_inst._core._utils.validateHash(i):
|
if stringvalidators.validate_hash(i):
|
||||||
# if newline seperated string is valid hash
|
# if newline seperated string is valid hash
|
||||||
if not i in existingBlocks:
|
if not i in existingBlocks:
|
||||||
# if block does not exist on disk and is not already in block queue
|
# if block does not exist on disk and is not already in block queue
|
||||||
|
@ -20,14 +20,14 @@
|
|||||||
'''
|
'''
|
||||||
import logger
|
import logger
|
||||||
from utils import netutils
|
from utils import netutils
|
||||||
from onionrutils import localcommand
|
from onionrutils import localcommand, epoch
|
||||||
def net_check(comm_inst):
|
def net_check(comm_inst):
|
||||||
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
||||||
rec = False # for detecting if we have received incoming connections recently
|
rec = False # for detecting if we have received incoming connections recently
|
||||||
c = comm_inst._core
|
c = comm_inst._core
|
||||||
if len(comm_inst.onlinePeers) == 0:
|
if len(comm_inst.onlinePeers) == 0:
|
||||||
try:
|
try:
|
||||||
if (c._utils.getEpoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
|
if (epoch.get_epoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
|
||||||
comm_inst.isOnline = True
|
comm_inst.isOnline = True
|
||||||
rec = True
|
rec = True
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
@ -20,16 +20,17 @@
|
|||||||
import logger
|
import logger
|
||||||
from communicatorutils import proxypicker
|
from communicatorutils import proxypicker
|
||||||
import onionrblockapi as block
|
import onionrblockapi as block
|
||||||
from onionrutils import localcommand
|
from onionrutils import localcommand, stringvalidators, basicrequests
|
||||||
|
|
||||||
def upload_blocks_from_communicator(comm_inst):
|
def upload_blocks_from_communicator(comm_inst):
|
||||||
# when inserting a block, we try to upload it to a few peers to add some deniability
|
# when inserting a block, we try to upload it to a few peers to add some deniability
|
||||||
triedPeers = []
|
triedPeers = []
|
||||||
finishedUploads = []
|
finishedUploads = []
|
||||||
comm_inst.blocksToUpload = comm_inst._core._crypto.randomShuffle(comm_inst.blocksToUpload)
|
core = comm_inst._core
|
||||||
|
comm_inst.blocksToUpload = core._crypto.randomShuffle(comm_inst.blocksToUpload)
|
||||||
if len(comm_inst.blocksToUpload) != 0:
|
if len(comm_inst.blocksToUpload) != 0:
|
||||||
for bl in comm_inst.blocksToUpload:
|
for bl in comm_inst.blocksToUpload:
|
||||||
if not comm_inst._core._utils.validateHash(bl):
|
if not stringvalidators.validate_hash(bl):
|
||||||
logger.warn('Requested to upload invalid block')
|
logger.warn('Requested to upload invalid block')
|
||||||
comm_inst.decrementThreadCount('uploadBlock')
|
comm_inst.decrementThreadCount('uploadBlock')
|
||||||
return
|
return
|
||||||
@ -42,8 +43,8 @@ def upload_blocks_from_communicator(comm_inst):
|
|||||||
data = {'block': block.Block(bl).getRaw()}
|
data = {'block': block.Block(bl).getRaw()}
|
||||||
proxyType = proxypicker.pick_proxy(peer)
|
proxyType = proxypicker.pick_proxy(peer)
|
||||||
logger.info("Uploading block to " + peer)
|
logger.info("Uploading block to " + peer)
|
||||||
if not comm_inst._core._utils.doPostRequest(url, data=data, proxyType=proxyType) == False:
|
if not basicrequests.do_post_request(core, url, data=data, proxyType=proxyType) == False:
|
||||||
localcommand.local_command(comm_inst._core, 'waitforshare/' + bl, post=True)
|
localcommand.local_command(core, 'waitforshare/' + bl, post=True)
|
||||||
finishedUploads.append(bl)
|
finishedUploads.append(bl)
|
||||||
for x in finishedUploads:
|
for x in finishedUploads:
|
||||||
try:
|
try:
|
||||||
|
131
onionr/core.py
131
onionr/core.py
@ -30,6 +30,7 @@ import dbcreator, onionrstorage, serializeddata, subprocesspow
|
|||||||
from etc import onionrvalues, powchoice
|
from etc import onionrvalues, powchoice
|
||||||
from onionrutils import localcommand, stringvalidators, bytesconverter, epoch
|
from onionrutils import localcommand, stringvalidators, bytesconverter, epoch
|
||||||
from onionrutils import blockmetadata
|
from onionrutils import blockmetadata
|
||||||
|
import storagecounter
|
||||||
|
|
||||||
class Core:
|
class Core:
|
||||||
def __init__(self, torPort=0):
|
def __init__(self, torPort=0):
|
||||||
@ -41,76 +42,76 @@ class Core:
|
|||||||
if not self.dataDir.endswith('/'):
|
if not self.dataDir.endswith('/'):
|
||||||
self.dataDir += '/'
|
self.dataDir += '/'
|
||||||
|
|
||||||
try:
|
#try:
|
||||||
self.onionrInst = None
|
self.usageFile = self.dataDir + 'disk-usage.txt'
|
||||||
self.queueDB = self.dataDir + 'queue.db'
|
self.config = config
|
||||||
self.peerDB = self.dataDir + 'peers.db'
|
self.maxBlockSize = 10000000 # max block size in bytes
|
||||||
self.blockDB = self.dataDir + 'blocks.db'
|
|
||||||
self.blockDataLocation = self.dataDir + 'blocks/'
|
|
||||||
self.blockDataDB = self.blockDataLocation + 'block-data.db'
|
|
||||||
self.publicApiHostFile = self.dataDir + 'public-host.txt'
|
|
||||||
self.privateApiHostFile = self.dataDir + 'private-host.txt'
|
|
||||||
self.addressDB = self.dataDir + 'address.db'
|
|
||||||
self.hsAddress = ''
|
|
||||||
self.i2pAddress = config.get('i2p.own_addr', None)
|
|
||||||
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
|
|
||||||
self.bootstrapList = []
|
|
||||||
self.requirements = onionrvalues.OnionrValues()
|
|
||||||
self.torPort = torPort
|
|
||||||
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
|
|
||||||
self.dbCreate = dbcreator.DBCreator(self)
|
|
||||||
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
|
|
||||||
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
|
|
||||||
|
|
||||||
# Socket data, defined here because of multithreading constraints with gevent
|
self.onionrInst = None
|
||||||
self.killSockets = False
|
self.queueDB = self.dataDir + 'queue.db'
|
||||||
self.startSocket = {}
|
self.peerDB = self.dataDir + 'peers.db'
|
||||||
self.socketServerConnData = {}
|
self.blockDB = self.dataDir + 'blocks.db'
|
||||||
self.socketReasons = {}
|
self.blockDataLocation = self.dataDir + 'blocks/'
|
||||||
self.socketServerResponseData = {}
|
self.blockDataDB = self.blockDataLocation + 'block-data.db'
|
||||||
|
self.publicApiHostFile = self.dataDir + 'public-host.txt'
|
||||||
|
self.privateApiHostFile = self.dataDir + 'private-host.txt'
|
||||||
|
self.addressDB = self.dataDir + 'address.db'
|
||||||
|
self.hsAddress = ''
|
||||||
|
self.i2pAddress = config.get('i2p.own_addr', None)
|
||||||
|
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
|
||||||
|
self.bootstrapList = []
|
||||||
|
self.requirements = onionrvalues.OnionrValues()
|
||||||
|
self.torPort = torPort
|
||||||
|
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
|
||||||
|
self.dbCreate = dbcreator.DBCreator(self)
|
||||||
|
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
|
||||||
|
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
|
||||||
|
self.storage_counter = storagecounter.StorageCounter(self)
|
||||||
|
|
||||||
self.usageFile = self.dataDir + 'disk-usage.txt'
|
# Socket data, defined here because of multithreading constraints with gevent
|
||||||
self.config = config
|
self.killSockets = False
|
||||||
|
self.startSocket = {}
|
||||||
|
self.socketServerConnData = {}
|
||||||
|
self.socketReasons = {}
|
||||||
|
self.socketServerResponseData = {}
|
||||||
|
|
||||||
self.maxBlockSize = 10000000 # max block size in bytes
|
if not os.path.exists(self.dataDir):
|
||||||
|
os.mkdir(self.dataDir)
|
||||||
|
if not os.path.exists(self.dataDir + 'blocks/'):
|
||||||
|
os.mkdir(self.dataDir + 'blocks/')
|
||||||
|
if not os.path.exists(self.blockDB):
|
||||||
|
self.createBlockDB()
|
||||||
|
if not os.path.exists(self.forwardKeysFile):
|
||||||
|
self.dbCreate.createForwardKeyDB()
|
||||||
|
if not os.path.exists(self.peerDB):
|
||||||
|
self.createPeerDB()
|
||||||
|
if not os.path.exists(self.addressDB):
|
||||||
|
self.createAddressDB()
|
||||||
|
|
||||||
if not os.path.exists(self.dataDir):
|
if os.path.exists(self.dataDir + '/hs/hostname'):
|
||||||
os.mkdir(self.dataDir)
|
with open(self.dataDir + '/hs/hostname', 'r') as hs:
|
||||||
if not os.path.exists(self.dataDir + 'blocks/'):
|
self.hsAddress = hs.read().strip()
|
||||||
os.mkdir(self.dataDir + 'blocks/')
|
|
||||||
if not os.path.exists(self.blockDB):
|
|
||||||
self.createBlockDB()
|
|
||||||
if not os.path.exists(self.forwardKeysFile):
|
|
||||||
self.dbCreate.createForwardKeyDB()
|
|
||||||
if not os.path.exists(self.peerDB):
|
|
||||||
self.createPeerDB()
|
|
||||||
if not os.path.exists(self.addressDB):
|
|
||||||
self.createAddressDB()
|
|
||||||
|
|
||||||
if os.path.exists(self.dataDir + '/hs/hostname'):
|
# Load bootstrap address list
|
||||||
with open(self.dataDir + '/hs/hostname', 'r') as hs:
|
if os.path.exists(self.bootstrapFileLocation):
|
||||||
self.hsAddress = hs.read().strip()
|
with open(self.bootstrapFileLocation, 'r') as bootstrap:
|
||||||
|
bootstrap = bootstrap.read()
|
||||||
|
for i in bootstrap.split('\n'):
|
||||||
|
self.bootstrapList.append(i)
|
||||||
|
else:
|
||||||
|
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
|
||||||
|
|
||||||
# Load bootstrap address list
|
self.use_subprocess = powchoice.use_subprocess(self)
|
||||||
if os.path.exists(self.bootstrapFileLocation):
|
# Initialize the crypto object
|
||||||
with open(self.bootstrapFileLocation, 'r') as bootstrap:
|
self._crypto = onionrcrypto.OnionrCrypto(self)
|
||||||
bootstrap = bootstrap.read()
|
self._blacklist = onionrblacklist.OnionrBlackList(self)
|
||||||
for i in bootstrap.split('\n'):
|
self.serializer = serializeddata.SerializedData(self)
|
||||||
self.bootstrapList.append(i)
|
|
||||||
else:
|
|
||||||
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
|
|
||||||
|
|
||||||
self.use_subprocess = powchoice.use_subprocess(self)
|
# except Exception as error:
|
||||||
self._utils = onionrutils.OnionrUtils(self)
|
# print(str(error))
|
||||||
# Initialize the crypto object
|
# logger.error('Failed to initialize core Onionr library.', error=error, terminal=True)
|
||||||
self._crypto = onionrcrypto.OnionrCrypto(self)
|
# logger.fatal('Cannot recover from error.', terminal=True)
|
||||||
self._blacklist = onionrblacklist.OnionrBlackList(self)
|
# sys.exit(1)
|
||||||
self.serializer = serializeddata.SerializedData(self)
|
|
||||||
|
|
||||||
except Exception as error:
|
|
||||||
logger.error('Failed to initialize core Onionr library.', error=error)
|
|
||||||
logger.fatal('Cannot recover from error.')
|
|
||||||
sys.exit(1)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def refreshFirstStartVars(self):
|
def refreshFirstStartVars(self):
|
||||||
@ -313,7 +314,7 @@ class Core:
|
|||||||
encryptType must be specified to encrypt a block
|
encryptType must be specified to encrypt a block
|
||||||
'''
|
'''
|
||||||
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
|
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
|
||||||
if self._utils.storageCounter.isFull():
|
if self.storage_counter.isFull():
|
||||||
logger.error(allocationReachedMessage)
|
logger.error(allocationReachedMessage)
|
||||||
return False
|
return False
|
||||||
retData = False
|
retData = False
|
||||||
@ -439,7 +440,7 @@ class Core:
|
|||||||
localcommand.local_command(self, '/waitforshare/' + retData, post=True, maxWait=5)
|
localcommand.local_command(self, '/waitforshare/' + retData, post=True, maxWait=5)
|
||||||
self.daemonQueueAdd('uploadBlock', retData)
|
self.daemonQueueAdd('uploadBlock', retData)
|
||||||
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
|
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
|
||||||
blockmetadata.process_block_metadata(retData)
|
blockmetadata.process_block_metadata(self, retData)
|
||||||
|
|
||||||
if retData != False:
|
if retData != False:
|
||||||
if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:
|
if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import os, sqlite3
|
import os, sqlite3
|
||||||
import onionrutils
|
from onionrutils import epoch, blockmetadata
|
||||||
def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False):
|
def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False):
|
||||||
'''
|
'''
|
||||||
Add a hash value to the block db
|
Add a hash value to the block db
|
||||||
@ -9,11 +9,11 @@ def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False):
|
|||||||
|
|
||||||
if not os.path.exists(core_inst.blockDB):
|
if not os.path.exists(core_inst.blockDB):
|
||||||
raise Exception('Block db does not exist')
|
raise Exception('Block db does not exist')
|
||||||
if onionrutils.has_block(core_inst, newHash):
|
if blockmetadata.has_block(core_inst, newHash):
|
||||||
return
|
return
|
||||||
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
currentTime = core_inst._utils.getEpoch() + core_inst._crypto.secrets.randbelow(301)
|
currentTime = epoch.get_epoch() + core_inst._crypto.secrets.randbelow(301)
|
||||||
if selfInsert or dataSaved:
|
if selfInsert or dataSaved:
|
||||||
selfInsert = 1
|
selfInsert = 1
|
||||||
else:
|
else:
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
|
from onionrutils import epoch
|
||||||
def get_expired_blocks(core_inst):
|
def get_expired_blocks(core_inst):
|
||||||
'''Returns a list of expired blocks'''
|
'''Returns a list of expired blocks'''
|
||||||
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
date = int(core_inst._utils.getEpoch())
|
date = int(epoch.get_epoch())
|
||||||
|
|
||||||
execute = 'SELECT hash FROM hashes WHERE expire <= %s ORDER BY dateReceived;' % (date,)
|
execute = 'SELECT hash FROM hashes WHERE expire <= %s ORDER BY dateReceived;' % (date,)
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import sqlite3, os
|
import sqlite3, os
|
||||||
import onionrevents as events
|
import onionrevents as events
|
||||||
from onionrutils import localcommand
|
from onionrutils import localcommand, epoch
|
||||||
|
|
||||||
def daemon_queue(core_inst):
|
def daemon_queue(core_inst):
|
||||||
'''
|
'''
|
||||||
@ -38,7 +38,7 @@ def daemon_queue_add(core_inst, command, data='', responseID=''):
|
|||||||
|
|
||||||
retData = True
|
retData = True
|
||||||
|
|
||||||
date = core_inst._utils.getEpoch()
|
date = epoch.get_epoch()
|
||||||
conn = sqlite3.connect(core_inst.queueDB, timeout=30)
|
conn = sqlite3.connect(core_inst.queueDB, timeout=30)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
t = (command, data, date, responseID)
|
t = (command, data, date, responseID)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
import logger
|
import logger
|
||||||
|
from onionrutils import epoch
|
||||||
def list_peers(core_inst, randomOrder=True, getPow=False, trust=0):
|
def list_peers(core_inst, randomOrder=True, getPow=False, trust=0):
|
||||||
'''
|
'''
|
||||||
Return a list of public keys (misleading function name)
|
Return a list of public keys (misleading function name)
|
||||||
@ -56,7 +57,7 @@ def list_adders(core_inst, randomOrder=True, i2p=True, recent=0):
|
|||||||
testList = list(addressList) # create new list to iterate
|
testList = list(addressList) # create new list to iterate
|
||||||
for address in testList:
|
for address in testList:
|
||||||
try:
|
try:
|
||||||
if recent > 0 and (core_inst._utils.getEpoch() - core_inst.getAddressInfo(address, 'lastConnect')) > recent:
|
if recent > 0 and (epoch.get_epoch() - core_inst.getAddressInfo(address, 'lastConnect')) > recent:
|
||||||
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
|
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
|
||||||
except TypeError:
|
except TypeError:
|
||||||
addressList.remove(address)
|
addressList.remove(address)
|
||||||
|
@ -18,7 +18,8 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
from flask import Response, abort
|
from flask import Response, abort
|
||||||
import config, onionrutils
|
import config
|
||||||
|
from onionrutils import bytesconverter, stringvalidators
|
||||||
def get_public_block_list(clientAPI, publicAPI, request):
|
def get_public_block_list(clientAPI, publicAPI, request):
|
||||||
# Provide a list of our blocks, with a date offset
|
# Provide a list of our blocks, with a date offset
|
||||||
dateAdjust = request.args.get('date')
|
dateAdjust = request.args.get('date')
|
||||||
@ -33,7 +34,7 @@ def get_public_block_list(clientAPI, publicAPI, request):
|
|||||||
def get_block_data(clientAPI, publicAPI, data):
|
def get_block_data(clientAPI, publicAPI, data):
|
||||||
'''data is the block hash in hex'''
|
'''data is the block hash in hex'''
|
||||||
resp = ''
|
resp = ''
|
||||||
if clientAPI._utils.validateHash(data):
|
if stringvalidators.validate_hash(data):
|
||||||
if not clientAPI._core.config.get('general.hide_created_blocks', True) or data not in publicAPI.hideBlocks:
|
if not clientAPI._core.config.get('general.hide_created_blocks', True) or data not in publicAPI.hideBlocks:
|
||||||
if data in clientAPI._core.getBlockList():
|
if data in clientAPI._core.getBlockList():
|
||||||
block = clientAPI.getBlockData(data, raw=True)
|
block = clientAPI.getBlockData(data, raw=True)
|
||||||
@ -41,7 +42,7 @@ def get_block_data(clientAPI, publicAPI, data):
|
|||||||
block = block.encode() # Encode in case data is binary
|
block = block.encode() # Encode in case data is binary
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
abort(404)
|
abort(404)
|
||||||
block = onionrutils.str_to_bytes(block)
|
block = bytesconverter.str_to_bytes(block)
|
||||||
resp = block
|
resp = block
|
||||||
if len(resp) == 0:
|
if len(resp) == 0:
|
||||||
abort(404)
|
abort(404)
|
||||||
|
@ -17,20 +17,20 @@
|
|||||||
You should have received a copy of the GNU General Public License
|
You should have received a copy of the GNU General Public License
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
|
from onionrutils import bytesconverter
|
||||||
import onionrcrypto
|
import onionrcrypto
|
||||||
class KeyManager:
|
class KeyManager:
|
||||||
def __init__(self, crypto):
|
def __init__(self, crypto):
|
||||||
assert isinstance(crypto, onionrcrypto.OnionrCrypto)
|
assert isinstance(crypto, onionrcrypto.OnionrCrypto)
|
||||||
self._core = crypto._core
|
self._core = crypto._core
|
||||||
self._utils = self._core._utils
|
|
||||||
self.keyFile = crypto._keyFile
|
self.keyFile = crypto._keyFile
|
||||||
self.crypto = crypto
|
self.crypto = crypto
|
||||||
|
|
||||||
def addKey(self, pubKey=None, privKey=None):
|
def addKey(self, pubKey=None, privKey=None):
|
||||||
if type(pubKey) is type(None) and type(privKey) is type(None):
|
if type(pubKey) is type(None) and type(privKey) is type(None):
|
||||||
pubKey, privKey = self.crypto.generatePubKey()
|
pubKey, privKey = self.crypto.generatePubKey()
|
||||||
pubKey = self.crypto._core._utils.bytesToStr(pubKey)
|
pubKey = bytesconverter.bytes_to_str(pubKey)
|
||||||
privKey = self.crypto._core._utils.bytesToStr(privKey)
|
privKey = bytesconverter.bytes_to_str(privKey)
|
||||||
try:
|
try:
|
||||||
if pubKey in self.getPubkeyList():
|
if pubKey in self.getPubkeyList():
|
||||||
raise ValueError('Pubkey already in list: %s' % (pubKey,))
|
raise ValueError('Pubkey already in list: %s' % (pubKey,))
|
||||||
|
@ -32,7 +32,6 @@ if sys.version_info[0] == 2 or sys.version_info[1] < MIN_PY_VERSION:
|
|||||||
import os, base64, random, shutil, time, platform, signal
|
import os, base64, random, shutil, time, platform, signal
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
import api, core, config, logger, onionrplugins as plugins, onionrevents as events
|
import api, core, config, logger, onionrplugins as plugins, onionrevents as events
|
||||||
import onionrutils
|
|
||||||
import netcontroller
|
import netcontroller
|
||||||
from netcontroller import NetController
|
from netcontroller import NetController
|
||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
@ -51,6 +50,7 @@ class Onionr:
|
|||||||
Main Onionr class. This is for the CLI program, and does not handle much of the logic.
|
Main Onionr class. This is for the CLI program, and does not handle much of the logic.
|
||||||
In general, external programs and plugins should not use this class.
|
In general, external programs and plugins should not use this class.
|
||||||
'''
|
'''
|
||||||
|
self.API_VERSION = API_VERSION
|
||||||
self.userRunDir = os.getcwd() # Directory user runs the program from
|
self.userRunDir = os.getcwd() # Directory user runs the program from
|
||||||
self.killed = False
|
self.killed = False
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
import sqlite3, os, logger
|
import sqlite3, os, logger
|
||||||
|
from onionrutils import epoch, bytesconverter
|
||||||
class OnionrBlackList:
|
class OnionrBlackList:
|
||||||
def __init__(self, coreInst):
|
def __init__(self, coreInst):
|
||||||
self.blacklistDB = coreInst.dataDir + 'blacklist.db'
|
self.blacklistDB = coreInst.dataDir + 'blacklist.db'
|
||||||
@ -28,7 +29,7 @@ class OnionrBlackList:
|
|||||||
return
|
return
|
||||||
|
|
||||||
def inBlacklist(self, data):
|
def inBlacklist(self, data):
|
||||||
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
|
hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
|
||||||
retData = False
|
retData = False
|
||||||
|
|
||||||
if not hashed.isalnum():
|
if not hashed.isalnum():
|
||||||
@ -56,7 +57,7 @@ class OnionrBlackList:
|
|||||||
def deleteExpired(self, dataType=0):
|
def deleteExpired(self, dataType=0):
|
||||||
'''Delete expired entries'''
|
'''Delete expired entries'''
|
||||||
deleteList = []
|
deleteList = []
|
||||||
curTime = self._core._utils.getEpoch()
|
curTime = epoch.get_epoch()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
int(dataType)
|
int(dataType)
|
||||||
@ -98,7 +99,7 @@ class OnionrBlackList:
|
|||||||
2=pubkey
|
2=pubkey
|
||||||
'''
|
'''
|
||||||
# we hash the data so we can remove data entirely from our node's disk
|
# we hash the data so we can remove data entirely from our node's disk
|
||||||
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
|
hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
|
||||||
if len(hashed) > 64:
|
if len(hashed) > 64:
|
||||||
raise Exception("Hashed data is too large")
|
raise Exception("Hashed data is too large")
|
||||||
|
|
||||||
@ -115,7 +116,7 @@ class OnionrBlackList:
|
|||||||
if self.inBlacklist(hashed):
|
if self.inBlacklist(hashed):
|
||||||
return
|
return
|
||||||
insert = (hashed,)
|
insert = (hashed,)
|
||||||
blacklistDate = self._core._utils.getEpoch()
|
blacklistDate = epoch.get_epoch()
|
||||||
try:
|
try:
|
||||||
self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
|
self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
|
||||||
except sqlite3.IntegrityError:
|
except sqlite3.IntegrityError:
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions
|
import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions
|
||||||
import json, os, sys, datetime, base64, onionrstorage
|
import json, os, sys, datetime, base64, onionrstorage
|
||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch
|
||||||
|
|
||||||
class Block:
|
class Block:
|
||||||
blockCacheOrder = list() # NEVER write your own code that writes to this!
|
blockCacheOrder = list() # NEVER write your own code that writes to this!
|
||||||
@ -89,7 +89,7 @@ class Block:
|
|||||||
|
|
||||||
# Check for replay attacks
|
# Check for replay attacks
|
||||||
try:
|
try:
|
||||||
if self.core._utils.getEpoch() - self.core.getBlockDate(self.hash) < 60:
|
if epoch.get_epoch() - self.core.getBlockDate(self.hash) < 60:
|
||||||
assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply'])
|
assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply'])
|
||||||
except (AssertionError, KeyError, TypeError) as e:
|
except (AssertionError, KeyError, TypeError) as e:
|
||||||
if not self.bypassReplayCheck:
|
if not self.bypassReplayCheck:
|
||||||
|
@ -19,12 +19,13 @@
|
|||||||
'''
|
'''
|
||||||
import sys
|
import sys
|
||||||
import logger
|
import logger
|
||||||
|
from onionrutils import stringvalidators
|
||||||
def ban_block(o_inst):
|
def ban_block(o_inst):
|
||||||
try:
|
try:
|
||||||
ban = sys.argv[2]
|
ban = sys.argv[2]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
ban = logger.readline('Enter a block hash:')
|
ban = logger.readline('Enter a block hash:')
|
||||||
if o_inst.onionrUtils.validateHash(ban):
|
if stringvalidators.validate_hash(ban):
|
||||||
if not o_inst.onionrCore._blacklist.inBlacklist(ban):
|
if not o_inst.onionrCore._blacklist.inBlacklist(ban):
|
||||||
try:
|
try:
|
||||||
o_inst.onionrCore._blacklist.addToDB(ban)
|
o_inst.onionrCore._blacklist.addToDB(ban)
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
'''
|
'''
|
||||||
import sys, os
|
import sys, os
|
||||||
import logger, onionrstorage
|
import logger, onionrstorage
|
||||||
|
from onionrutils import stringvalidators
|
||||||
def doExport(o_inst, bHash):
|
def doExport(o_inst, bHash):
|
||||||
exportDir = o_inst.dataDir + 'block-export/'
|
exportDir = o_inst.dataDir + 'block-export/'
|
||||||
if not os.path.exists(exportDir):
|
if not os.path.exists(exportDir):
|
||||||
@ -34,7 +35,7 @@ def doExport(o_inst, bHash):
|
|||||||
def export_block(o_inst):
|
def export_block(o_inst):
|
||||||
exportDir = o_inst.dataDir + 'block-export/'
|
exportDir = o_inst.dataDir + 'block-export/'
|
||||||
try:
|
try:
|
||||||
assert o_inst.onionrUtils.validateHash(sys.argv[2])
|
assert stringvalidators.validate_hash(sys.argv[2])
|
||||||
except (IndexError, AssertionError):
|
except (IndexError, AssertionError):
|
||||||
logger.error('No valid block hash specified.', terminal=True)
|
logger.error('No valid block hash specified.', terminal=True)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import base64, sys, os
|
import base64, sys, os
|
||||||
import logger
|
import logger
|
||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
|
from onionrutils import stringvalidators
|
||||||
def add_file(o_inst, singleBlock=False, blockType='bin'):
|
def add_file(o_inst, singleBlock=False, blockType='bin'):
|
||||||
'''
|
'''
|
||||||
Adds a file to the onionr network
|
Adds a file to the onionr network
|
||||||
@ -60,7 +61,7 @@ def getFile(o_inst):
|
|||||||
if os.path.exists(fileName):
|
if os.path.exists(fileName):
|
||||||
logger.error("File already exists", terminal=True)
|
logger.error("File already exists", terminal=True)
|
||||||
return
|
return
|
||||||
if not o_inst.onionrUtils.validateHash(bHash):
|
if not stringvalidators.validate_hash(bHash):
|
||||||
logger.error('Block hash is invalid', terminal=True)
|
logger.error('Block hash is invalid', terminal=True)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ import os, binascii, base64, hashlib, time, sys, hmac, secrets
|
|||||||
import nacl.signing, nacl.encoding, nacl.public, nacl.hash, nacl.pwhash, nacl.utils, nacl.secret
|
import nacl.signing, nacl.encoding, nacl.public, nacl.hash, nacl.pwhash, nacl.utils, nacl.secret
|
||||||
import unpaddedbase32
|
import unpaddedbase32
|
||||||
import logger, onionrproofs
|
import logger, onionrproofs
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch, bytesconverter
|
||||||
import onionrexceptions, keymanager, core, onionrutils
|
import onionrexceptions, keymanager, core, onionrutils
|
||||||
import config
|
import config
|
||||||
config.reload()
|
config.reload()
|
||||||
@ -95,10 +95,10 @@ class OnionrCrypto:
|
|||||||
|
|
||||||
def pubKeyEncrypt(self, data, pubkey, encodedData=False):
|
def pubKeyEncrypt(self, data, pubkey, encodedData=False):
|
||||||
'''Encrypt to a public key (Curve25519, taken from base32 Ed25519 pubkey)'''
|
'''Encrypt to a public key (Curve25519, taken from base32 Ed25519 pubkey)'''
|
||||||
pubkey = unpaddedbase32.repad(onionrutils.str_to_bytes(pubkey))
|
pubkey = unpaddedbase32.repad(bytesconverter.str_to_bytes(pubkey))
|
||||||
retVal = ''
|
retVal = ''
|
||||||
box = None
|
box = None
|
||||||
data = onionrutils.str_to_bytes(data)
|
data = bytesconverter.str_to_bytes(data)
|
||||||
|
|
||||||
pubkey = nacl.signing.VerifyKey(pubkey, encoder=nacl.encoding.Base32Encoder()).to_curve25519_public_key()
|
pubkey = nacl.signing.VerifyKey(pubkey, encoder=nacl.encoding.Base32Encoder()).to_curve25519_public_key()
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ class OnionrCrypto:
|
|||||||
def generateDeterministic(self, passphrase, bypassCheck=False):
|
def generateDeterministic(self, passphrase, bypassCheck=False):
|
||||||
'''Generate a Ed25519 public key pair from a password'''
|
'''Generate a Ed25519 public key pair from a password'''
|
||||||
passStrength = self.deterministicRequirement
|
passStrength = self.deterministicRequirement
|
||||||
passphrase = onionrutils.str_to_bytes(passphrase) # Convert to bytes if not already
|
passphrase = bytesconverter.str_to_bytes(passphrase) # Convert to bytes if not already
|
||||||
# Validate passphrase length
|
# Validate passphrase length
|
||||||
if not bypassCheck:
|
if not bypassCheck:
|
||||||
if len(passphrase) < passStrength:
|
if len(passphrase) < passStrength:
|
||||||
@ -202,7 +202,7 @@ class OnionrCrypto:
|
|||||||
if pubkey == '':
|
if pubkey == '':
|
||||||
pubkey = self.pubKey
|
pubkey = self.pubKey
|
||||||
prev = ''
|
prev = ''
|
||||||
pubkey = onionrutils.str_to_bytes(pubkey)
|
pubkey = bytesconverter.str_to_bytes(pubkey)
|
||||||
for i in range(self.HASH_ID_ROUNDS):
|
for i in range(self.HASH_ID_ROUNDS):
|
||||||
try:
|
try:
|
||||||
prev = prev.encode()
|
prev = prev.encode()
|
||||||
@ -266,7 +266,7 @@ class OnionrCrypto:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def replayTimestampValidation(timestamp):
|
def replayTimestampValidation(timestamp):
|
||||||
if core.Core()._utils.getEpoch() - int(timestamp) > 2419200:
|
if epoch.get_epoch() - int(timestamp) > 2419200:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
'''
|
'''
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import core, config, logger
|
import core, config, logger
|
||||||
|
from onionrutils import epoch
|
||||||
config.reload()
|
config.reload()
|
||||||
class PeerProfiles:
|
class PeerProfiles:
|
||||||
'''
|
'''
|
||||||
@ -106,7 +107,7 @@ def peerCleanup(coreInst):
|
|||||||
if PeerProfiles(address, coreInst).score < minScore:
|
if PeerProfiles(address, coreInst).score < minScore:
|
||||||
coreInst.removeAddress(address)
|
coreInst.removeAddress(address)
|
||||||
try:
|
try:
|
||||||
if (int(coreInst._utils.getEpoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
|
if (int(epoch.get_epoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
|
||||||
expireTime = 600
|
expireTime = 600
|
||||||
else:
|
else:
|
||||||
expireTime = 86400
|
expireTime = 86400
|
||||||
|
@ -170,9 +170,6 @@ class pluginapi:
|
|||||||
def get_core(self):
|
def get_core(self):
|
||||||
return self.core
|
return self.core
|
||||||
|
|
||||||
def get_utils(self):
|
|
||||||
return self.get_core()._utils
|
|
||||||
|
|
||||||
def get_crypto(self):
|
def get_crypto(self):
|
||||||
return self.get_core()._crypto
|
return self.get_core()._crypto
|
||||||
|
|
||||||
|
@ -30,10 +30,7 @@ def getDifficultyModifier(coreOrUtilsInst=None):
|
|||||||
'''
|
'''
|
||||||
classInst = coreOrUtilsInst
|
classInst = coreOrUtilsInst
|
||||||
retData = 0
|
retData = 0
|
||||||
if isinstance(classInst, core.Core):
|
useFunc = classInst.storage_counter.getPercent
|
||||||
useFunc = classInst._utils.storageCounter.getPercent
|
|
||||||
else:
|
|
||||||
useFunc = core.Core()._utils.storageCounter.getPercent
|
|
||||||
|
|
||||||
percentUse = useFunc()
|
percentUse = useFunc()
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ import time
|
|||||||
import stem
|
import stem
|
||||||
import core
|
import core
|
||||||
from . import connectionserver, bootstrapservice
|
from . import connectionserver, bootstrapservice
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, basicrequests
|
||||||
|
|
||||||
class OnionrServices:
|
class OnionrServices:
|
||||||
'''
|
'''
|
||||||
@ -47,7 +47,7 @@ class OnionrServices:
|
|||||||
base_url = 'http://%s/' % (address,)
|
base_url = 'http://%s/' % (address,)
|
||||||
socks = self._core.config.get('tor.socksport')
|
socks = self._core.config.get('tor.socksport')
|
||||||
for x in range(BOOTSTRAP_TRIES):
|
for x in range(BOOTSTRAP_TRIES):
|
||||||
if self._core._utils.doGetRequest(base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
|
if basicrequests.do_get_request(self._core, base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
|
||||||
# if bootstrap sever is online, tell them our service address
|
# if bootstrap sever is online, tell them our service address
|
||||||
connectionserver.ConnectionServer(peer, address, core_inst=self._core)
|
connectionserver.ConnectionServer(peer, address, core_inst=self._core)
|
||||||
else:
|
else:
|
||||||
|
@ -24,7 +24,7 @@ from flask import Flask, Response
|
|||||||
import core
|
import core
|
||||||
from netcontroller import getOpenPort
|
from netcontroller import getOpenPort
|
||||||
from . import httpheaders
|
from . import httpheaders
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch
|
||||||
|
|
||||||
def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
|
def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
|
||||||
'''
|
'''
|
||||||
@ -77,7 +77,7 @@ def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
|
|||||||
# Create the v3 onion service
|
# Create the v3 onion service
|
||||||
response = controller.create_ephemeral_hidden_service({80: bootstrap_port}, key_type = 'NEW', key_content = 'ED25519-V3', await_publication = True)
|
response = controller.create_ephemeral_hidden_service({80: bootstrap_port}, key_type = 'NEW', key_content = 'ED25519-V3', await_publication = True)
|
||||||
core_inst.insertBlock(response.service_id, header='con', sign=True, encryptType='asym',
|
core_inst.insertBlock(response.service_id, header='con', sign=True, encryptType='asym',
|
||||||
asymPeer=peer, disableForward=True, expire=(core_inst._utils.getEpoch() + bootstrap_timeout))
|
asymPeer=peer, disableForward=True, expire=(epoch.get_epoch() + bootstrap_timeout))
|
||||||
# Run the bootstrap server
|
# Run the bootstrap server
|
||||||
try:
|
try:
|
||||||
http_server.serve_forever()
|
http_server.serve_forever()
|
||||||
|
@ -24,7 +24,7 @@ import core, logger, httpapi
|
|||||||
import onionrexceptions
|
import onionrexceptions
|
||||||
from netcontroller import getOpenPort
|
from netcontroller import getOpenPort
|
||||||
import api
|
import api
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, basicrequests
|
||||||
from . import httpheaders
|
from . import httpheaders
|
||||||
|
|
||||||
class ConnectionServer:
|
class ConnectionServer:
|
||||||
@ -72,7 +72,7 @@ class ConnectionServer:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
for x in range(3):
|
for x in range(3):
|
||||||
attempt = self.core_inst._utils.doPostRequest('http://' + address + '/bs/' + response.service_id, port=socks)
|
attempt = basicrequests.do_post_request(self.core_inst, 'http://' + address + '/bs/' + response.service_id, port=socks)
|
||||||
if attempt == 'success':
|
if attempt == 'success':
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
import core, sys, sqlite3, os, dbcreator, onionrexceptions
|
import core, sys, sqlite3, os, dbcreator, onionrexceptions
|
||||||
from onionrutils import bytesconverter
|
from onionrutils import bytesconverter, stringvalidators
|
||||||
|
|
||||||
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
|
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ def deleteBlock(coreInst, blockHash):
|
|||||||
|
|
||||||
def store(coreInst, data, blockHash=''):
|
def store(coreInst, data, blockHash=''):
|
||||||
assert isinstance(coreInst, core.Core)
|
assert isinstance(coreInst, core.Core)
|
||||||
assert coreInst._utils.validateHash(blockHash)
|
assert stringvalidators.validate_hash(blockHash)
|
||||||
ourHash = coreInst._crypto.sha3Hash(data)
|
ourHash = coreInst._crypto.sha3Hash(data)
|
||||||
if blockHash != '':
|
if blockHash != '':
|
||||||
assert ourHash == blockHash
|
assert ourHash == blockHash
|
||||||
@ -81,7 +81,7 @@ def store(coreInst, data, blockHash=''):
|
|||||||
|
|
||||||
def getData(coreInst, bHash):
|
def getData(coreInst, bHash):
|
||||||
assert isinstance(coreInst, core.Core)
|
assert isinstance(coreInst, core.Core)
|
||||||
assert coreInst._utils.validateHash(bHash)
|
assert stringvalidators.validate_hash(bHash)
|
||||||
|
|
||||||
bHash = bytesconverter.bytes_to_str(bHash)
|
bHash = bytesconverter.bytes_to_str(bHash)
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import sys, sqlite3
|
import sys, sqlite3
|
||||||
import onionrexceptions, onionrstorage
|
import onionrexceptions, onionrstorage
|
||||||
|
from onionrutils import stringvalidators
|
||||||
def remove_block(core_inst, block):
|
def remove_block(core_inst, block):
|
||||||
'''
|
'''
|
||||||
remove a block from this node (does not automatically blacklist)
|
remove a block from this node (does not automatically blacklist)
|
||||||
@ -7,7 +8,7 @@ def remove_block(core_inst, block):
|
|||||||
**You may want blacklist.addToDB(blockHash)
|
**You may want blacklist.addToDB(blockHash)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if core_inst._utils.validateHash(block):
|
if stringvalidators.validate_hash(block):
|
||||||
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
t = (block,)
|
t = (block,)
|
||||||
@ -15,6 +16,6 @@ def remove_block(core_inst, block):
|
|||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
dataSize = sys.getsizeof(onionrstorage.getData(core_inst, block))
|
dataSize = sys.getsizeof(onionrstorage.getData(core_inst, block))
|
||||||
core_inst._utils.storageCounter.removeBytes(dataSize)
|
core_inst.storage_counter.removeBytes(dataSize)
|
||||||
else:
|
else:
|
||||||
raise onionrexceptions.InvalidHexHash
|
raise onionrexceptions.InvalidHexHash
|
@ -19,7 +19,7 @@ def set_data(core_inst, data):
|
|||||||
try:
|
try:
|
||||||
onionrstorage.getData(core_inst, dataHash)
|
onionrstorage.getData(core_inst, dataHash)
|
||||||
except onionrexceptions.NoDataAvailable:
|
except onionrexceptions.NoDataAvailable:
|
||||||
if core_inst._utils.storageCounter.addBytes(dataSize) != False:
|
if core_inst.storage_counter.addBytes(dataSize) != False:
|
||||||
onionrstorage.store(core_inst, data, blockHash=dataHash)
|
onionrstorage.store(core_inst, data, blockHash=dataHash)
|
||||||
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import os, json, onionrexceptions
|
import os, json, onionrexceptions
|
||||||
import unpaddedbase32
|
import unpaddedbase32
|
||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
from onionrutils import bytesconverter
|
from onionrutils import bytesconverter, epoch
|
||||||
|
|
||||||
class ContactManager(onionrusers.OnionrUser):
|
class ContactManager(onionrusers.OnionrUser):
|
||||||
def __init__(self, coreInst, publicKey, saveUser=False, recordExpireSeconds=5):
|
def __init__(self, coreInst, publicKey, saveUser=False, recordExpireSeconds=5):
|
||||||
@ -42,7 +42,7 @@ class ContactManager(onionrusers.OnionrUser):
|
|||||||
dataFile.write(data)
|
dataFile.write(data)
|
||||||
|
|
||||||
def _loadData(self):
|
def _loadData(self):
|
||||||
self.lastRead = self._core._utils.getEpoch()
|
self.lastRead = epoch.get_epoch()
|
||||||
retData = {}
|
retData = {}
|
||||||
if os.path.exists(self.dataFile):
|
if os.path.exists(self.dataFile):
|
||||||
with open(self.dataFile, 'r') as dataFile:
|
with open(self.dataFile, 'r') as dataFile:
|
||||||
@ -62,7 +62,7 @@ class ContactManager(onionrusers.OnionrUser):
|
|||||||
if self.deleted:
|
if self.deleted:
|
||||||
raise onionrexceptions.ContactDeleted
|
raise onionrexceptions.ContactDeleted
|
||||||
|
|
||||||
if (self._core._utils.getEpoch() - self.lastRead >= self.recordExpire) or forceReload:
|
if (epoch.get_epoch() - self.lastRead >= self.recordExpire) or forceReload:
|
||||||
self.data = self._loadData()
|
self.data = self._loadData()
|
||||||
try:
|
try:
|
||||||
return self.data[key]
|
return self.data[key]
|
||||||
|
@ -18,8 +18,7 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
import logger, onionrexceptions, json, sqlite3, time
|
import logger, onionrexceptions, json, sqlite3, time
|
||||||
from onionrutils import stringvalidators, bytesconverter
|
from onionrutils import stringvalidators, bytesconverter, epoch
|
||||||
|
|
||||||
import unpaddedbase32
|
import unpaddedbase32
|
||||||
import nacl.exceptions
|
import nacl.exceptions
|
||||||
|
|
||||||
@ -28,7 +27,7 @@ def deleteExpiredKeys(coreInst):
|
|||||||
conn = sqlite3.connect(coreInst.forwardKeysFile, timeout=10)
|
conn = sqlite3.connect(coreInst.forwardKeysFile, timeout=10)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
|
||||||
curTime = coreInst._utils.getEpoch()
|
curTime = epoch.get_epoch()
|
||||||
c.execute("DELETE from myForwardKeys where expire <= ?", (curTime,))
|
c.execute("DELETE from myForwardKeys where expire <= ?", (curTime,))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.execute("VACUUM")
|
conn.execute("VACUUM")
|
||||||
@ -40,7 +39,7 @@ def deleteTheirExpiredKeys(coreInst, pubkey):
|
|||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
|
||||||
# Prepare the insert
|
# Prepare the insert
|
||||||
command = (pubkey, coreInst._utils.getEpoch())
|
command = (pubkey, epoch.get_epoch())
|
||||||
|
|
||||||
c.execute("DELETE from forwardKeys where peerKey = ? and expire <= ?", command)
|
c.execute("DELETE from forwardKeys where peerKey = ? and expire <= ?", command)
|
||||||
|
|
||||||
@ -160,10 +159,10 @@ class OnionrUser:
|
|||||||
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
|
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
# Prepare the insert
|
# Prepare the insert
|
||||||
time = self._core._utils.getEpoch()
|
time = epoch.get_epoch()
|
||||||
newKeys = self._core._crypto.generatePubKey()
|
newKeys = self._core._crypto.generatePubKey()
|
||||||
newPub = self._core._utils.bytesToStr(newKeys[0])
|
newPub = bytesconverter.bytes_to_str(newKeys[0])
|
||||||
newPriv = self._core._utils.bytesToStr(newKeys[1])
|
newPriv = bytesconverter.bytes_to_str(newKeys[1])
|
||||||
|
|
||||||
command = (self.publicKey, newPub, newPriv, time, expire + time)
|
command = (self.publicKey, newPub, newPriv, time, expire + time)
|
||||||
|
|
||||||
@ -178,7 +177,7 @@ class OnionrUser:
|
|||||||
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
|
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
pubkey = self.publicKey
|
pubkey = self.publicKey
|
||||||
pubkey = self._core._utils.bytesToStr(pubkey)
|
pubkey = bytesconverter.bytes_to_str(pubkey)
|
||||||
command = (pubkey,)
|
command = (pubkey,)
|
||||||
keyList = [] # list of tuples containing pub, private for peer
|
keyList = [] # list of tuples containing pub, private for peer
|
||||||
|
|
||||||
@ -192,7 +191,7 @@ class OnionrUser:
|
|||||||
return list(keyList)
|
return list(keyList)
|
||||||
|
|
||||||
def addForwardKey(self, newKey, expire=DEFAULT_KEY_EXPIRE):
|
def addForwardKey(self, newKey, expire=DEFAULT_KEY_EXPIRE):
|
||||||
newKey = self._core._utils.bytesToStr(unpaddedbase32.repad(bytesconverter.str_to_bytes(newKey)))
|
newKey = bytesconverter.bytes_to_str(unpaddedbase32.repad(bytesconverter.str_to_bytes(newKey)))
|
||||||
if not stringvalidators.validate_pub_key(newKey):
|
if not stringvalidators.validate_pub_key(newKey):
|
||||||
# Do not add if something went wrong with the key
|
# Do not add if something went wrong with the key
|
||||||
raise onionrexceptions.InvalidPubkey(newKey)
|
raise onionrexceptions.InvalidPubkey(newKey)
|
||||||
@ -201,7 +200,7 @@ class OnionrUser:
|
|||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
|
||||||
# Get the time we're inserting the key at
|
# Get the time we're inserting the key at
|
||||||
timeInsert = self._core._utils.getEpoch()
|
timeInsert = epoch.get_epoch()
|
||||||
|
|
||||||
# Look at our current keys for duplicate key data or time
|
# Look at our current keys for duplicate key data or time
|
||||||
for entry in self._getForwardKeys():
|
for entry in self._getForwardKeys():
|
||||||
|
@ -1,120 +0,0 @@
|
|||||||
'''
|
|
||||||
Onionr - Private P2P Communication
|
|
||||||
|
|
||||||
OnionrUtils offers various useful functions to Onionr. Relatively misc.
|
|
||||||
'''
|
|
||||||
'''
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
'''
|
|
||||||
# Misc functions that do not fit in the main api, but are useful
|
|
||||||
import sys, os, sqlite3, binascii, time, base64, json, glob, shutil, math, re, urllib.parse, string
|
|
||||||
import requests
|
|
||||||
import nacl.signing, nacl.encoding
|
|
||||||
import unpaddedbase32
|
|
||||||
import onionrexceptions, config, logger
|
|
||||||
import onionrevents
|
|
||||||
import storagecounter
|
|
||||||
from etc import pgpwords, onionrvalues
|
|
||||||
from . import localcommand, blockmetadata, basicrequests, validatemetadata
|
|
||||||
from . import stringvalidators
|
|
||||||
|
|
||||||
config.reload()
|
|
||||||
class OnionrUtils:
|
|
||||||
'''
|
|
||||||
Various useful functions for validating things, etc functions, connectivity
|
|
||||||
'''
|
|
||||||
def __init__(self, coreInstance):
|
|
||||||
#self.fingerprintFile = 'data/own-fingerprint.txt' #TODO Remove since probably not needed
|
|
||||||
self._core = coreInstance # onionr core instance
|
|
||||||
|
|
||||||
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
|
|
||||||
self.peerProcessing = {} # dict of current peer actions: peer, actionList
|
|
||||||
self.storageCounter = storagecounter.StorageCounter(self._core) # used to keep track of how much data onionr is using on disk
|
|
||||||
return
|
|
||||||
|
|
||||||
def escapeAnsi(self, line):
|
|
||||||
'''
|
|
||||||
Remove ANSI escape codes from a string with regex
|
|
||||||
|
|
||||||
taken or adapted from: https://stackoverflow.com/a/38662876 by user https://stackoverflow.com/users/802365/%c3%89douard-lopez
|
|
||||||
cc-by-sa-3 license https://creativecommons.org/licenses/by-sa/3.0/
|
|
||||||
'''
|
|
||||||
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
|
|
||||||
return ansi_escape.sub('', line)
|
|
||||||
|
|
||||||
def validateHash(self, data, length=64):
|
|
||||||
'''
|
|
||||||
Validate if a string is a valid hash hex digest (does not compare, just checks length and charset)
|
|
||||||
'''
|
|
||||||
return stringvalidators.validate_hash(self, data, length)
|
|
||||||
|
|
||||||
def getEpoch(self):
|
|
||||||
'''returns epoch'''
|
|
||||||
return math.floor(time.time())
|
|
||||||
|
|
||||||
def doPostRequest(self, url, data={}, port=0, proxyType='tor'):
|
|
||||||
'''
|
|
||||||
Do a POST request through a local tor or i2p instance
|
|
||||||
'''
|
|
||||||
return basicrequests.do_post_request(self, url, data, port, proxyType)
|
|
||||||
|
|
||||||
def doGetRequest(self, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
|
|
||||||
'''
|
|
||||||
Do a get request through a local tor or i2p instance
|
|
||||||
'''
|
|
||||||
return basicrequests.do_get_request(self, url, port, proxyType, ignoreAPI, returnHeaders)
|
|
||||||
|
|
||||||
def size(path='.'):
|
|
||||||
'''
|
|
||||||
Returns the size of a folder's contents in bytes
|
|
||||||
'''
|
|
||||||
total = 0
|
|
||||||
if os.path.exists(path):
|
|
||||||
if os.path.isfile(path):
|
|
||||||
total = os.path.getsize(path)
|
|
||||||
else:
|
|
||||||
for entry in os.scandir(path):
|
|
||||||
if entry.is_file():
|
|
||||||
total += entry.stat().st_size
|
|
||||||
elif entry.is_dir():
|
|
||||||
total += size(entry.path)
|
|
||||||
return total
|
|
||||||
|
|
||||||
def humanSize(num, suffix='B'):
|
|
||||||
'''
|
|
||||||
Converts from bytes to a human readable format.
|
|
||||||
'''
|
|
||||||
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
|
|
||||||
if abs(num) < 1024.0:
|
|
||||||
return "%.1f %s%s" % (num, unit, suffix)
|
|
||||||
num /= 1024.0
|
|
||||||
return "%.1f %s%s" % (num, 'Yi', suffix)
|
|
||||||
|
|
||||||
def has_block(core_inst, hash):
|
|
||||||
'''
|
|
||||||
Check for new block in the list
|
|
||||||
'''
|
|
||||||
conn = sqlite3.connect(core_inst.blockDB)
|
|
||||||
c = conn.cursor()
|
|
||||||
if not stringvalidators.validate_hash(hash):
|
|
||||||
raise Exception("Invalid hash")
|
|
||||||
for result in c.execute("SELECT COUNT() FROM hashes WHERE hash = ?", (hash,)):
|
|
||||||
if result[0] >= 1:
|
|
||||||
conn.commit()
|
|
||||||
conn.close()
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
conn.commit()
|
|
||||||
conn.close()
|
|
||||||
return False
|
|
@ -1,12 +1,12 @@
|
|||||||
import requests
|
import requests
|
||||||
import logger, onionrexceptions
|
import logger, onionrexceptions
|
||||||
def do_post_request(utils_inst, url, data={}, port=0, proxyType='tor'):
|
def do_post_request(core_inst, url, data={}, port=0, proxyType='tor'):
|
||||||
'''
|
'''
|
||||||
Do a POST request through a local tor or i2p instance
|
Do a POST request through a local tor or i2p instance
|
||||||
'''
|
'''
|
||||||
if proxyType == 'tor':
|
if proxyType == 'tor':
|
||||||
if port == 0:
|
if port == 0:
|
||||||
port = utils_inst._core.torPort
|
port = core_inst.torPort
|
||||||
proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)}
|
proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)}
|
||||||
elif proxyType == 'i2p':
|
elif proxyType == 'i2p':
|
||||||
proxies = {'http': 'http://127.0.0.1:4444'}
|
proxies = {'http': 'http://127.0.0.1:4444'}
|
||||||
@ -24,11 +24,11 @@ def do_post_request(utils_inst, url, data={}, port=0, proxyType='tor'):
|
|||||||
retData = False
|
retData = False
|
||||||
return retData
|
return retData
|
||||||
|
|
||||||
def do_get_request(utils_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
|
def do_get_request(core_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
|
||||||
'''
|
'''
|
||||||
Do a get request through a local tor or i2p instance
|
Do a get request through a local tor or i2p instance
|
||||||
'''
|
'''
|
||||||
API_VERSION = utils_inst._core.onionrInst.API_VERSION
|
API_VERSION = core_inst.onionrInst.API_VERSION
|
||||||
retData = False
|
retData = False
|
||||||
if proxyType == 'tor':
|
if proxyType == 'tor':
|
||||||
if port == 0:
|
if port == 0:
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
import json
|
import json, sqlite3
|
||||||
import logger, onionrevents
|
import logger, onionrevents
|
||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
from etc import onionrvalues
|
from etc import onionrvalues
|
||||||
import onionrblockapi
|
import onionrblockapi
|
||||||
from . import epoch
|
from . import epoch, stringvalidators, bytesconverter
|
||||||
def get_block_metadata_from_data(blockData):
|
def get_block_metadata_from_data(blockData):
|
||||||
'''
|
'''
|
||||||
accepts block contents as string, returns a tuple of
|
accepts block contents as string, returns a tuple of
|
||||||
@ -33,24 +33,24 @@ def get_block_metadata_from_data(blockData):
|
|||||||
meta = metadata['meta']
|
meta = metadata['meta']
|
||||||
return (metadata, meta, data)
|
return (metadata, meta, data)
|
||||||
|
|
||||||
def process_block_metadata(utils_inst, blockHash):
|
def process_block_metadata(core_inst, blockHash):
|
||||||
'''
|
'''
|
||||||
Read metadata from a block and cache it to the block database
|
Read metadata from a block and cache it to the block database
|
||||||
'''
|
'''
|
||||||
curTime = epoch.get_rounded_epoch(roundS=60)
|
curTime = epoch.get_rounded_epoch(roundS=60)
|
||||||
myBlock = onionrblockapi.Block(blockHash, utils_inst._core)
|
myBlock = onionrblockapi.Block(blockHash, core_inst)
|
||||||
if myBlock.isEncrypted:
|
if myBlock.isEncrypted:
|
||||||
myBlock.decrypt()
|
myBlock.decrypt()
|
||||||
if (myBlock.isEncrypted and myBlock.decrypted) or (not myBlock.isEncrypted):
|
if (myBlock.isEncrypted and myBlock.decrypted) or (not myBlock.isEncrypted):
|
||||||
blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks
|
blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks
|
||||||
signer = utils_inst.bytesToStr(myBlock.signer)
|
signer = bytesconverter.bytes_to_str(myBlock.signer)
|
||||||
valid = myBlock.verifySig()
|
valid = myBlock.verifySig()
|
||||||
if myBlock.getMetadata('newFSKey') is not None:
|
if myBlock.getMetadata('newFSKey') is not None:
|
||||||
onionrusers.OnionrUser(utils_inst._core, signer).addForwardKey(myBlock.getMetadata('newFSKey'))
|
onionrusers.OnionrUser(core_inst, signer).addForwardKey(myBlock.getMetadata('newFSKey'))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if len(blockType) <= 10:
|
if len(blockType) <= 10:
|
||||||
utils_inst._core.updateBlockInfo(blockHash, 'dataType', blockType)
|
core_inst.updateBlockInfo(blockHash, 'dataType', blockType)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
logger.warn("Missing block information")
|
logger.warn("Missing block information")
|
||||||
pass
|
pass
|
||||||
@ -61,9 +61,28 @@ def process_block_metadata(utils_inst, blockHash):
|
|||||||
except (AssertionError, ValueError, TypeError) as e:
|
except (AssertionError, ValueError, TypeError) as e:
|
||||||
expireTime = onionrvalues.OnionrValues().default_expire + curTime
|
expireTime = onionrvalues.OnionrValues().default_expire + curTime
|
||||||
finally:
|
finally:
|
||||||
utils_inst._core.updateBlockInfo(blockHash, 'expire', expireTime)
|
core_inst.updateBlockInfo(blockHash, 'expire', expireTime)
|
||||||
if not blockType is None:
|
if not blockType is None:
|
||||||
utils_inst._core.updateBlockInfo(blockHash, 'dataType', blockType)
|
core_inst.updateBlockInfo(blockHash, 'dataType', blockType)
|
||||||
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = utils_inst._core.onionrInst)
|
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def has_block(core_inst, hash):
|
||||||
|
'''
|
||||||
|
Check for new block in the list
|
||||||
|
'''
|
||||||
|
conn = sqlite3.connect(core_inst.blockDB)
|
||||||
|
c = conn.cursor()
|
||||||
|
if not stringvalidators.validate_hash(hash):
|
||||||
|
raise Exception("Invalid hash")
|
||||||
|
for result in c.execute("SELECT COUNT() FROM hashes WHERE hash = ?", (hash,)):
|
||||||
|
if result[0] >= 1:
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
return False
|
||||||
|
return False
|
@ -6,6 +6,6 @@ def get_rounded_epoch(roundS=60):
|
|||||||
epoch = get_epoch()
|
epoch = get_epoch()
|
||||||
return epoch - (epoch % roundS)
|
return epoch - (epoch % roundS)
|
||||||
|
|
||||||
def get_epoch(self):
|
def get_epoch():
|
||||||
'''returns epoch'''
|
'''returns epoch'''
|
||||||
return math.floor(time.time())
|
return math.floor(time.time())
|
10
onionr/onionrutils/escapeansi.py
Normal file
10
onionr/onionrutils/escapeansi.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
import re
|
||||||
|
def escape_ANSI(line):
|
||||||
|
'''
|
||||||
|
Remove ANSI escape codes from a string with regex
|
||||||
|
|
||||||
|
taken or adapted from: https://stackoverflow.com/a/38662876 by user https://stackoverflow.com/users/802365/%c3%89douard-lopez
|
||||||
|
cc-by-sa-3 license https://creativecommons.org/licenses/by-sa/3.0/
|
||||||
|
'''
|
||||||
|
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
|
||||||
|
return ansi_escape.sub('', line)
|
@ -1,6 +1,7 @@
|
|||||||
import base64, string, onionrutils
|
import base64, string
|
||||||
import unpaddedbase32, nacl.signing, nacl.encoding
|
import unpaddedbase32, nacl.signing, nacl.encoding
|
||||||
def validate_hash(utils_inst, data, length=64):
|
from onionrutils import bytesconverter
|
||||||
|
def validate_hash(data, length=64):
|
||||||
'''
|
'''
|
||||||
Validate if a string is a valid hash hex digest (does not compare, just checks length and charset)
|
Validate if a string is a valid hash hex digest (does not compare, just checks length and charset)
|
||||||
'''
|
'''
|
||||||
@ -25,7 +26,7 @@ def validate_pub_key(key):
|
|||||||
if type(key) is type(None):
|
if type(key) is type(None):
|
||||||
return False
|
return False
|
||||||
# Accept keys that have no = padding
|
# Accept keys that have no = padding
|
||||||
key = unpaddedbase32.repad(onionrutils.str_to_bytes(key))
|
key = unpaddedbase32.repad(bytesconverter.str_to_bytes(key))
|
||||||
|
|
||||||
retVal = False
|
retVal = False
|
||||||
try:
|
try:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
import logger, onionrexceptions
|
import logger, onionrexceptions
|
||||||
from etc import onionrvalues
|
from etc import onionrvalues
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch
|
||||||
def validate_metadata(core_inst, metadata, blockData):
|
def validate_metadata(core_inst, metadata, blockData):
|
||||||
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
|
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
|
||||||
# TODO, make this check sane sizes
|
# TODO, make this check sane sizes
|
||||||
@ -37,18 +37,18 @@ def validate_metadata(core_inst, metadata, blockData):
|
|||||||
if not stringvalidators.is_integer_string(metadata[i]):
|
if not stringvalidators.is_integer_string(metadata[i]):
|
||||||
logger.warn('Block metadata time stamp is not integer string or int')
|
logger.warn('Block metadata time stamp is not integer string or int')
|
||||||
break
|
break
|
||||||
isFuture = (metadata[i] - core_inst.getEpoch())
|
isFuture = (metadata[i] - epoch.get_epoch())
|
||||||
if isFuture > maxClockDifference:
|
if isFuture > maxClockDifference:
|
||||||
logger.warn('Block timestamp is skewed to the future over the max %s: %s' (maxClockDifference, isFuture))
|
logger.warn('Block timestamp is skewed to the future over the max %s: %s' (maxClockDifference, isFuture))
|
||||||
break
|
break
|
||||||
if (core_inst.getEpoch() - metadata[i]) > maxAge:
|
if (epoch.get_epoch() - metadata[i]) > maxAge:
|
||||||
logger.warn('Block is outdated: %s' % (metadata[i],))
|
logger.warn('Block is outdated: %s' % (metadata[i],))
|
||||||
break
|
break
|
||||||
elif i == 'expire':
|
elif i == 'expire':
|
||||||
try:
|
try:
|
||||||
assert int(metadata[i]) > core_inst.getEpoch()
|
assert int(metadata[i]) > epoch.get_epoch()
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
logger.warn('Block is expired: %s less than %s' % (metadata[i], core_inst.getEpoch()))
|
logger.warn('Block is expired: %s less than %s' % (metadata[i], epoch.get_epoch()))
|
||||||
break
|
break
|
||||||
elif i == 'encryptType':
|
elif i == 'encryptType':
|
||||||
try:
|
try:
|
||||||
|
@ -23,7 +23,7 @@ import locale, sys, os, threading, json
|
|||||||
locale.setlocale(locale.LC_ALL, '')
|
locale.setlocale(locale.LC_ALL, '')
|
||||||
import onionrservices, logger
|
import onionrservices, logger
|
||||||
from onionrservices import bootstrapservice
|
from onionrservices import bootstrapservice
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, epoch, basicrequests
|
||||||
|
|
||||||
plugin_name = 'esoteric'
|
plugin_name = 'esoteric'
|
||||||
PLUGIN_VERSION = '0.0.0'
|
PLUGIN_VERSION = '0.0.0'
|
||||||
@ -58,8 +58,8 @@ class Esoteric:
|
|||||||
else:
|
else:
|
||||||
message += '\n'
|
message += '\n'
|
||||||
except EOFError:
|
except EOFError:
|
||||||
message = json.dumps({'m': message, 't': self.myCore._utils.getEpoch()})
|
message = json.dumps({'m': message, 't': epoch.get_epoch()})
|
||||||
print(self.myCore._utils.doPostRequest('http://%s/esoteric/sendto' % (self.transport,), port=self.socks, data=message))
|
print(basicrequests.do_post_request(self.myCore, 'http://%s/esoteric/sendto' % (self.transport,), port=self.socks, data=message))
|
||||||
message = ''
|
message = ''
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
self.shutdown = True
|
self.shutdown = True
|
||||||
@ -78,7 +78,7 @@ class Esoteric:
|
|||||||
self.socks = self.myCore.config.get('tor.socksport')
|
self.socks = self.myCore.config.get('tor.socksport')
|
||||||
|
|
||||||
print('connected with', peer, 'on', peer_transport_address)
|
print('connected with', peer, 'on', peer_transport_address)
|
||||||
if self.myCore._utils.doGetRequest('http://%s/ping' % (peer_transport_address,), ignoreAPI=True, port=self.socks) == 'pong!':
|
if basicrequests.do_get_request(self.myCore, 'http://%s/ping' % (peer_transport_address,), ignoreAPI=True, port=self.socks) == 'pong!':
|
||||||
print('connected', peer_transport_address)
|
print('connected', peer_transport_address)
|
||||||
threading.Thread(target=self._sender_loop).start()
|
threading.Thread(target=self._sender_loop).start()
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import threading, time, locale, sys, os
|
import threading, time, locale, sys, os
|
||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
import logger, config
|
import logger, config
|
||||||
|
from onionrutils import escapeansi, epoch
|
||||||
locale.setlocale(locale.LC_ALL, '')
|
locale.setlocale(locale.LC_ALL, '')
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
|
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
|
||||||
@ -43,7 +44,7 @@ class OnionrFlow:
|
|||||||
logger.warn("Please note: everything said here is public, even if a random channel name is used.", terminal=True)
|
logger.warn("Please note: everything said here is public, even if a random channel name is used.", terminal=True)
|
||||||
message = ""
|
message = ""
|
||||||
self.flowRunning = True
|
self.flowRunning = True
|
||||||
newThread = threading.Thread(target=self.showOutput)
|
newThread = threading.Thread(target=self.showOutput, daemon=True)
|
||||||
newThread.start()
|
newThread.start()
|
||||||
try:
|
try:
|
||||||
self.channel = logger.readline("Enter a channel name or none for default:")
|
self.channel = logger.readline("Enter a channel name or none for default:")
|
||||||
@ -59,7 +60,7 @@ class OnionrFlow:
|
|||||||
else:
|
else:
|
||||||
if message == "q":
|
if message == "q":
|
||||||
self.flowRunning = False
|
self.flowRunning = False
|
||||||
expireTime = self.myCore._utils.getEpoch() + 43200
|
expireTime = epoch.get_epoch() + 43200
|
||||||
if len(message) > 0:
|
if len(message) > 0:
|
||||||
logger.info('Inserting message as block...', terminal=True)
|
logger.info('Inserting message as block...', terminal=True)
|
||||||
self.myCore.insertBlock(message, header='txt', expire=expireTime, meta={'ch': self.channel})
|
self.myCore.insertBlock(message, header='txt', expire=expireTime, meta={'ch': self.channel})
|
||||||
@ -83,7 +84,7 @@ class OnionrFlow:
|
|||||||
logger.info('\n------------------------', prompt = False, terminal=True)
|
logger.info('\n------------------------', prompt = False, terminal=True)
|
||||||
content = block.getContent()
|
content = block.getContent()
|
||||||
# Escape new lines, remove trailing whitespace, and escape ansi sequences
|
# Escape new lines, remove trailing whitespace, and escape ansi sequences
|
||||||
content = self.myCore._utils.escapeAnsi(content.replace('\n', '\\n').replace('\r', '\\r').strip())
|
content = escapeansi.escape_ANSI(content.replace('\n', '\\n').replace('\r', '\\r').strip())
|
||||||
logger.info(block.getDate().strftime("%m/%d %H:%M") + ' - ' + logger.colors.reset + content, prompt = False, terminal=True)
|
logger.info(block.getDate().strftime("%m/%d %H:%M") + ' - ' + logger.colors.reset + content, prompt = False, terminal=True)
|
||||||
self.alreadyOutputed.append(block.getHash())
|
self.alreadyOutputed.append(block.getHash())
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
import logger, config
|
import logger, config
|
||||||
import os, sys, json, time, random, shutil, base64, getpass, datetime, re
|
import os, sys, json, time, random, shutil, base64, getpass, datetime, re
|
||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
from onionrutils import importnewblocks, stringvalidators,
|
from onionrutils import importnewblocks, stringvalidators
|
||||||
|
|
||||||
plugin_name = 'pluginmanager'
|
plugin_name = 'pluginmanager'
|
||||||
|
|
||||||
@ -397,7 +397,7 @@ def commandInstallPlugin():
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
valid_hash = pluginapi.get_utils().validateHash(pkobh)
|
valid_hash = stringvalidators.validate_hash(pkobh)
|
||||||
real_block = False
|
real_block = False
|
||||||
valid_key = stringvalidators.validate_pub_key(pkobh)
|
valid_key = stringvalidators.validate_pub_key(pkobh)
|
||||||
real_key = False
|
real_key = False
|
||||||
@ -485,7 +485,7 @@ def commandAddRepository():
|
|||||||
|
|
||||||
blockhash = sys.argv[2]
|
blockhash = sys.argv[2]
|
||||||
|
|
||||||
if pluginapi.get_utils().validateHash(blockhash):
|
if stringvalidators.validate_hash(blockhash):
|
||||||
if Block.exists(blockhash):
|
if Block.exists(blockhash):
|
||||||
try:
|
try:
|
||||||
blockContent = json.loads(Block(blockhash, core = pluginapi.get_core()).getContent())
|
blockContent = json.loads(Block(blockhash, core = pluginapi.get_core()).getContent())
|
||||||
@ -521,7 +521,7 @@ def commandRemoveRepository():
|
|||||||
|
|
||||||
blockhash = sys.argv[2]
|
blockhash = sys.argv[2]
|
||||||
|
|
||||||
if pluginapi.get_utils().validateHash(blockhash):
|
if stringvalidators.validate_hash(blockhash):
|
||||||
if blockhash in getRepositories():
|
if blockhash in getRepositories():
|
||||||
try:
|
try:
|
||||||
removeRepository(blockhash)
|
removeRepository(blockhash)
|
||||||
|
@ -21,6 +21,7 @@ import sys, os, json
|
|||||||
from flask import Response, request, redirect, Blueprint, abort
|
from flask import Response, request, redirect, Blueprint, abort
|
||||||
import core
|
import core
|
||||||
from onionrusers import contactmanager
|
from onionrusers import contactmanager
|
||||||
|
from onionrutils import stringvalidators
|
||||||
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
|
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
|
||||||
import loadinbox, sentboxdb
|
import loadinbox, sentboxdb
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ def mail_ping():
|
|||||||
|
|
||||||
@flask_blueprint.route('/mail/deletemsg/<block>', methods=['POST'])
|
@flask_blueprint.route('/mail/deletemsg/<block>', methods=['POST'])
|
||||||
def mail_delete(block):
|
def mail_delete(block):
|
||||||
if not c._utils.validateHash(block):
|
if not stringvalidators.validate_hash(block):
|
||||||
abort(504)
|
abort(504)
|
||||||
existing = kv.get('deleted_mail')
|
existing = kv.get('deleted_mail')
|
||||||
if existing is None:
|
if existing is None:
|
||||||
|
@ -23,7 +23,7 @@ import logger, config, threading, time, datetime
|
|||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
import onionrexceptions
|
import onionrexceptions
|
||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators, escapeansi
|
||||||
import locale, sys, os, json
|
import locale, sys, os, json
|
||||||
|
|
||||||
locale.setlocale(locale.LC_ALL, '')
|
locale.setlocale(locale.LC_ALL, '')
|
||||||
@ -148,7 +148,7 @@ class OnionrMail:
|
|||||||
print('')
|
print('')
|
||||||
if cancel != '-q':
|
if cancel != '-q':
|
||||||
try:
|
try:
|
||||||
print(draw_border(self.myCore._utils.escapeAnsi(readBlock.bcontent.decode().strip())))
|
print(draw_border(escapeansi.escape_ANSI(readBlock.bcontent.decode().strip())))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
logger.warn('Error presenting message. This is usually due to a malformed or blank message.', terminal=True)
|
logger.warn('Error presenting message. This is usually due to a malformed or blank message.', terminal=True)
|
||||||
pass
|
pass
|
||||||
@ -187,7 +187,7 @@ class OnionrMail:
|
|||||||
else:
|
else:
|
||||||
logger.info('Sent to: ' + self.sentMessages[self.sentboxList[int(choice)]][1], terminal=True)
|
logger.info('Sent to: ' + self.sentMessages[self.sentboxList[int(choice)]][1], terminal=True)
|
||||||
# Print ansi escaped sent message
|
# Print ansi escaped sent message
|
||||||
logger.info(self.myCore._utils.escapeAnsi(self.sentMessages[self.sentboxList[int(choice)]][0]), terminal=True)
|
logger.info(escapeansi.escape_ANSI(self.sentMessages[self.sentboxList[int(choice)]][0]), terminal=True)
|
||||||
input('Press enter to continue...')
|
input('Press enter to continue...')
|
||||||
finally:
|
finally:
|
||||||
if choice == '-q':
|
if choice == '-q':
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
'''
|
'''
|
||||||
import sqlite3, os
|
import sqlite3, os
|
||||||
import core
|
import core
|
||||||
|
from onionrutils import epoch
|
||||||
class SentBox:
|
class SentBox:
|
||||||
def __init__(self, mycore):
|
def __init__(self, mycore):
|
||||||
assert isinstance(mycore, core.Core)
|
assert isinstance(mycore, core.Core)
|
||||||
@ -60,7 +61,7 @@ class SentBox:
|
|||||||
|
|
||||||
def addToSent(self, blockID, peer, message, subject=''):
|
def addToSent(self, blockID, peer, message, subject=''):
|
||||||
self.connect()
|
self.connect()
|
||||||
args = (blockID, peer, message, subject, self.core._utils.getEpoch())
|
args = (blockID, peer, message, subject, epoch.get_epoch())
|
||||||
self.cursor.execute('INSERT INTO sent VALUES(?, ?, ?, ?, ?)', args)
|
self.cursor.execute('INSERT INTO sent VALUES(?, ?, ?, ?, ?)', args)
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
self.close()
|
self.close()
|
||||||
|
@ -17,7 +17,8 @@
|
|||||||
You should have received a copy of the GNU General Public License
|
You should have received a copy of the GNU General Public License
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
def checkNetwork(utilsInst, torPort=0):
|
from onionrutils import basicrequests
|
||||||
|
def checkNetwork(core_inst, torPort=0):
|
||||||
'''Check if we are connected to the internet (through Tor)'''
|
'''Check if we are connected to the internet (through Tor)'''
|
||||||
retData = False
|
retData = False
|
||||||
connectURLs = []
|
connectURLs = []
|
||||||
@ -26,7 +27,7 @@ def checkNetwork(utilsInst, torPort=0):
|
|||||||
connectURLs = connectTest.read().split(',')
|
connectURLs = connectTest.read().split(',')
|
||||||
|
|
||||||
for url in connectURLs:
|
for url in connectURLs:
|
||||||
if utilsInst.doGetRequest(url, port=torPort, ignoreAPI=True) != False:
|
if basicrequests.do_get_request(core_inst, url, port=torPort, ignoreAPI=True) != False:
|
||||||
retData = True
|
retData = True
|
||||||
break
|
break
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
27
onionr/utils/sizeutils.py
Normal file
27
onionr/utils/sizeutils.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import sqlite3, os
|
||||||
|
from onionrutils import stringvalidators
|
||||||
|
def human_size(num, suffix='B'):
|
||||||
|
'''
|
||||||
|
Converts from bytes to a human readable format.
|
||||||
|
'''
|
||||||
|
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
|
||||||
|
if abs(num) < 1024.0:
|
||||||
|
return "%.1f %s%s" % (num, unit, suffix)
|
||||||
|
num /= 1024.0
|
||||||
|
return "%.1f %s%s" % (num, 'Yi', suffix)
|
||||||
|
|
||||||
|
def size(path='.'):
|
||||||
|
'''
|
||||||
|
Returns the size of a folder's contents in bytes
|
||||||
|
'''
|
||||||
|
total = 0
|
||||||
|
if os.path.exists(path):
|
||||||
|
if os.path.isfile(path):
|
||||||
|
total = os.path.getsize(path)
|
||||||
|
else:
|
||||||
|
for entry in os.scandir(path):
|
||||||
|
if entry.is_file():
|
||||||
|
total += entry.stat().st_size
|
||||||
|
elif entry.is_dir():
|
||||||
|
total += size(entry.path)
|
||||||
|
return total
|
Loading…
Reference in New Issue
Block a user