Onionr/onionr/core.py

871 lines
30 KiB
Python
Raw Normal View History

'''
Onionr - P2P Anonymous Storage Network
2018-02-01 22:45:15 +00:00
Core Onionr library, useful for external programs. Handles peer & data processing
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3, os, sys, time, math, base64, tarfile, nacl, logger, json, netcontroller, math, config, uuid
2018-05-19 22:11:51 +00:00
from onionrblockapi import Block
2018-01-07 08:55:44 +00:00
import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues
import onionrblacklist, onionrusers
import dbcreator, onionrstorage, serializeddata
if sys.version_info < (3, 6):
try:
import sha3
except ModuleNotFoundError:
2018-01-26 07:22:48 +00:00
logger.fatal('On Python 3 versions prior to 3.6.x, you need the sha3 module')
sys.exit(1)
class Core:
def __init__(self, torPort=0):
'''
Initialize Core Onionr library
'''
2018-04-19 02:16:10 +00:00
try:
self.dataDir = os.environ['ONIONR_HOME']
if not self.dataDir.endswith('/'):
self.dataDir += '/'
except KeyError:
self.dataDir = 'data/'
2018-04-19 02:16:10 +00:00
try:
self.queueDB = self.dataDir + 'queue.db'
self.peerDB = self.dataDir + 'peers.db'
self.blockDB = self.dataDir + 'blocks.db'
self.blockDataLocation = self.dataDir + 'blocks/'
2019-01-05 06:15:31 +00:00
self.blockDataDB = self.blockDataLocation + 'block-data.db'
2018-12-18 23:48:17 +00:00
self.publicApiHostFile = self.dataDir + 'public-host.txt'
self.privateApiHostFile = self.dataDir + 'private-host.txt'
self.addressDB = self.dataDir + 'address.db'
self.hsAddress = ''
self.i2pAddress = config.get('i2p.ownAddr', None)
2018-04-22 23:35:00 +00:00
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
self.dbCreate = dbcreator.DBCreator(self)
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
2018-09-24 01:47:27 +00:00
# Socket data, defined here because of multithreading constraints with gevent
2018-09-23 04:53:09 +00:00
self.killSockets = False
self.startSocket = {}
2018-09-24 01:47:27 +00:00
self.socketServerConnData = {}
self.socketReasons = {}
self.socketServerResponseData = {}
2018-09-23 04:53:09 +00:00
self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config
2018-04-22 23:35:00 +00:00
2018-08-24 22:42:09 +00:00
self.maxBlockSize = 10000000 # max block size in bytes
2018-04-22 23:35:00 +00:00
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataDir + 'blocks/'):
os.mkdir(self.dataDir + 'blocks/')
2018-04-19 02:16:10 +00:00
if not os.path.exists(self.blockDB):
self.createBlockDB()
2018-10-07 05:06:44 +00:00
if not os.path.exists(self.forwardKeysFile):
self.dbCreate.createForwardKeyDB()
2018-04-19 02:16:10 +00:00
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
2018-04-22 23:35:00 +00:00
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
with open(self.bootstrapFileLocation, 'r') as bootstrap:
bootstrap = bootstrap.read()
for i in bootstrap.split('\n'):
self.bootstrapList.append(i)
else:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
2018-04-19 02:16:10 +00:00
self._utils = onionrutils.OnionrUtils(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self)
2018-04-22 23:35:00 +00:00
2018-04-19 02:16:10 +00:00
except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error)
logger.fatal('Cannot recover from error.')
2018-05-18 21:49:05 +00:00
sys.exit(1)
2018-01-06 08:51:26 +00:00
return
2018-01-10 03:50:38 +00:00
def refreshFirstStartVars(self):
'''
Hack to refresh some vars which may not be set on first start
'''
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
2018-12-09 17:29:39 +00:00
def addPeer(self, peerID, name=''):
'''
2018-03-16 15:35:37 +00:00
Adds a public key to the key database (misleading function name)
'''
2018-01-10 03:50:38 +00:00
# This function simply adds a peer to the DB
2018-02-21 09:32:31 +00:00
if not self._utils.validatePubKey(peerID):
2018-01-26 09:46:21 +00:00
return False
2018-09-09 05:12:41 +00:00
events.event('pubkey_add', data = {'key': peerID}, onionr = None)
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.peerDB, timeout=10)
2018-04-26 07:40:39 +00:00
hashID = self._crypto.pubKeyHashID(peerID)
c = conn.cursor()
2018-12-09 17:29:39 +00:00
t = (peerID, name, 'unknown', hashID, 0)
2018-04-04 01:54:49 +00:00
for i in c.execute("SELECT * FROM peers WHERE id = ?;", (peerID,)):
2018-04-04 01:54:49 +00:00
try:
if i[0] == peerID:
conn.close()
return False
except ValueError:
pass
except IndexError:
pass
2018-12-09 17:29:39 +00:00
c.execute('INSERT INTO peers (id, name, dateSeen, hashID, trust) VALUES(?, ?, ?, ?, ?);', t)
conn.commit()
conn.close()
2018-04-21 03:10:50 +00:00
return True
2018-02-27 21:23:49 +00:00
def addAddress(self, address):
2018-04-21 03:10:50 +00:00
'''
Add an address to the address database (only tor currently)
'''
if address == config.get('i2p.ownAddr', None) or address == self.hsAddress:
return False
if type(address) is None or len(address) == 0:
2018-12-09 17:29:39 +00:00
return False
2018-02-27 21:23:49 +00:00
if self._utils.validateID(address):
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.addressDB, timeout=10)
2018-02-27 21:23:49 +00:00
c = conn.cursor()
# check if address is in database
# this is safe to do because the address is validated above, but we strip some chars here too just in case
address = address.replace('\'', '').replace(';', '').replace('"', '').replace('\\', '')
for i in c.execute("SELECT * FROM adders WHERE address = ?;", (address,)):
try:
if i[0] == address:
conn.close()
return False
except ValueError:
pass
except IndexError:
pass
2018-02-27 21:23:49 +00:00
t = (address, 1)
c.execute('INSERT INTO adders (address, type) VALUES(?, ?);', t)
conn.commit()
conn.close()
2018-04-21 03:10:50 +00:00
events.event('address_add', data = {'address': address}, onionr = None)
2018-02-27 21:23:49 +00:00
return True
else:
#logger.debug('Invalid ID: %s' % address)
2018-02-27 21:23:49 +00:00
return False
2018-02-28 00:00:37 +00:00
2018-02-27 21:23:49 +00:00
def removeAddress(self, address):
2018-04-21 03:10:50 +00:00
'''
Remove an address from the address database
'''
2018-02-27 21:23:49 +00:00
if self._utils.validateID(address):
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.addressDB, timeout=10)
2018-02-27 21:23:49 +00:00
c = conn.cursor()
t = (address,)
c.execute('Delete from adders where address=?;', t)
conn.commit()
conn.close()
2018-04-21 03:10:50 +00:00
events.event('address_remove', data = {'address': address}, onionr = None)
2018-02-27 21:23:49 +00:00
return True
else:
2018-04-19 02:16:10 +00:00
return False
2018-05-02 06:01:20 +00:00
def removeBlock(self, block):
'''
remove a block from this node (does not automatically blacklist)
**You may want blacklist.addToDB(blockHash)
'''
if self._utils.validateHash(block):
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
t = (block,)
c.execute('Delete from hashes where hash=?;', t)
conn.commit()
conn.close()
blockFile = self.dataDir + '/blocks/%s.dat' % block
dataSize = 0
try:
2018-09-24 23:48:00 +00:00
''' Get size of data when loaded as an object/var, rather than on disk,
to avoid conflict with getsizeof when saving blocks
'''
with open(blockFile, 'r') as data:
dataSize = sys.getsizeof(data.read())
self._utils.storageCounter.removeBytes(dataSize)
os.remove(blockFile)
except FileNotFoundError:
pass
2018-02-27 21:23:49 +00:00
2018-02-21 09:32:31 +00:00
def createAddressDB(self):
'''
Generate the address database
'''
self.dbCreate.createAddressDB()
2018-01-10 03:50:38 +00:00
def createPeerDB(self):
'''
Generate the peer sqlite3 database and populate it with the peers table.
'''
self.dbCreate.createPeerDB()
def createBlockDB(self):
'''
Create a database for blocks
'''
self.dbCreate.createBlockDB()
2018-05-10 07:42:24 +00:00
def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False):
'''
Add a hash value to the block db
Should be in hex format!
'''
if not os.path.exists(self.blockDB):
raise Exception('Block db does not exist')
if self._utils.hasBlock(newHash):
return
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
currentTime = self._utils.getEpoch() + self._crypto.secrets.randbelow(301)
2018-05-10 07:42:24 +00:00
if selfInsert or dataSaved:
selfInsert = 1
else:
selfInsert = 0
data = (newHash, currentTime, '', selfInsert)
c.execute('INSERT INTO hashes (hash, dateReceived, dataType, dataSaved) VALUES(?, ?, ?, ?);', data)
conn.commit()
conn.close()
return
def getData(self, hash):
'''
Simply return the data associated to a hash
'''
2019-01-05 22:16:36 +00:00
'''
2018-01-27 21:49:48 +00:00
try:
# logger.debug('Opening %s' % (str(self.blockDataLocation) + str(hash) + '.dat'))
2018-04-23 01:43:17 +00:00
dataFile = open(self.blockDataLocation + hash + '.dat', 'rb')
2018-01-27 21:49:48 +00:00
data = dataFile.read()
dataFile.close()
except FileNotFoundError:
data = False
2019-01-05 22:16:36 +00:00
'''
data = onionrstorage.getData(self, hash)
return data
def setData(self, data):
'''
Set the data assciated with a hash
'''
data = data
dataSize = sys.getsizeof(data)
2018-04-23 02:24:34 +00:00
if not type(data) is bytes:
data = data.encode()
2018-09-24 23:48:00 +00:00
dataHash = self._crypto.sha3Hash(data)
2018-08-13 03:48:33 +00:00
2018-01-29 02:14:46 +00:00
if type(dataHash) is bytes:
dataHash = dataHash.decode()
2018-01-25 22:39:09 +00:00
blockFileName = self.blockDataLocation + dataHash + '.dat'
if os.path.exists(blockFileName):
pass # TODO: properly check if block is already saved elsewhere
#raise Exception("Data is already set for " + dataHash)
2018-01-25 22:39:09 +00:00
else:
if self._utils.storageCounter.addBytes(dataSize) != False:
2019-01-05 22:16:36 +00:00
#blockFile = open(blockFileName, 'wb')
#blockFile.write(data)
#blockFile.close()
onionrstorage.store(self, data, blockHash=dataHash)
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
2018-09-27 00:50:54 +00:00
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = ?;", (dataHash,))
conn.commit()
conn.close()
with open(self.dataNonceFile, 'a') as nonceFile:
nonceFile.write(dataHash + '\n')
else:
raise onionrexceptions.DiskAllocationReached
2018-01-25 22:39:09 +00:00
return dataHash
def daemonQueue(self):
'''
Gives commands to the communication proccess/daemon by reading an sqlite3 database
This function intended to be used by the client. Queue to exchange data between "client" and server.
'''
retData = False
if not os.path.exists(self.queueDB):
self.dbCreate.createDaemonDB()
else:
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.queueDB, timeout=10)
2018-01-04 07:12:46 +00:00
c = conn.cursor()
try:
for row in c.execute('SELECT command, data, date, min(ID), responseID FROM commands group by id'):
retData = row
break
except sqlite3.OperationalError:
self.dbCreate.createDaemonDB()
else:
if retData != False:
c.execute('DELETE FROM commands WHERE id=?;', (retData[3],))
2018-07-01 03:38:57 +00:00
conn.commit()
conn.close()
2018-04-21 03:10:50 +00:00
events.event('queue_pop', data = {'data': retData}, onionr = None)
return retData
2018-07-04 19:07:17 +00:00
def daemonQueueAdd(self, command, data='', responseID=''):
'''
Add a command to the daemon queue, used by the communication daemon (communicator.py)
'''
2018-09-02 02:59:03 +00:00
retData = True
date = self._utils.getEpoch()
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.queueDB, timeout=10)
c = conn.cursor()
t = (command, data, date, responseID)
2018-09-02 02:59:03 +00:00
try:
c.execute('INSERT INTO commands (command, data, date, responseID) VALUES(?, ?, ?, ?)', t)
2018-09-02 02:59:03 +00:00
conn.commit()
except sqlite3.OperationalError:
retData = False
2018-09-02 04:55:24 +00:00
self.daemonQueue()
2018-04-21 03:10:50 +00:00
events.event('queue_push', data = {'command': command, 'data': data}, onionr = None)
conn.close()
2018-09-02 02:59:03 +00:00
return retData
2019-01-18 01:14:26 +00:00
def daemonQueueGetResponse(self, responseID=''):
'''
Get a response sent by communicator to the API, by requesting to the API
'''
assert len(responseID) > 0
resp = self._utils.localCommand('queueResponse/' + responseID)
return resp
2019-01-18 01:14:26 +00:00
def daemonQueueWaitForResponse(self, responseID='', checkFreqSecs=1):
resp = 'failure'
while resp == 'failure':
resp = self.daemonQueueGetResponse(responseID)
time.sleep(1)
return resp
2019-01-18 01:14:26 +00:00
def daemonQueueSimple(self, command, data='', checkFreqSecs=1):
'''
A simplified way to use the daemon queue. Will register a command (with optional data) and wait, return the data
Not always useful, but saves time + LOC in some cases.
This is a blocking function, so be careful.
'''
responseID = str(uuid.uuid4()) # generate unique response ID
self.daemonQueueAdd(command, data=data, responseID=responseID)
return self.daemonQueueWaitForResponse(responseID, checkFreqSecs)
2018-01-27 01:16:15 +00:00
def clearDaemonQueue(self):
'''
Clear the daemon queue (somewhat dangerous)
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.queueDB, timeout=10)
2018-01-27 01:16:15 +00:00
c = conn.cursor()
try:
2018-04-21 03:10:50 +00:00
c.execute('DELETE FROM commands;')
conn.commit()
except:
pass
2018-01-27 01:16:15 +00:00
conn.close()
2018-04-21 03:10:50 +00:00
events.event('queue_clear', onionr = None)
return
2018-04-19 02:16:10 +00:00
2019-01-11 22:59:21 +00:00
def listAdders(self, randomOrder=True, i2p=True, recent=0):
'''
Return a list of addresses
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.addressDB, timeout=10)
c = conn.cursor()
if randomOrder:
addresses = c.execute('SELECT * FROM adders ORDER BY RANDOM();')
else:
addresses = c.execute('SELECT * FROM adders;')
addressList = []
for i in addresses:
2019-01-11 22:59:21 +00:00
if len(i[0].strip()) == 0:
continue
2018-02-28 00:00:37 +00:00
addressList.append(i[0])
conn.close()
2019-01-11 22:59:21 +00:00
testList = list(addressList) # create new list to iterate
for address in testList:
try:
if recent > 0 and (self._utils.getEpoch() - self.getAddressInfo(address, 'lastConnect')) > recent:
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
except TypeError:
addressList.remove(address)
return addressList
def listPeers(self, randomOrder=True, getPow=False, trust=0):
'''
2018-03-16 15:35:37 +00:00
Return a list of public keys (misleading function name)
2018-01-28 01:53:24 +00:00
randomOrder determines if the list should be in a random order
trust sets the minimum trust to list
2018-01-26 06:28:11 +00:00
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.peerDB, timeout=10)
2018-01-26 06:28:11 +00:00
c = conn.cursor()
2018-09-27 00:50:54 +00:00
payload = ''
if trust not in (0, 1, 2):
logger.error('Tried to select invalid trust.')
return
2018-09-27 00:50:54 +00:00
2018-01-28 01:53:24 +00:00
if randomOrder:
payload = 'SELECT * FROM peers WHERE trust >= ? ORDER BY RANDOM();'
2018-01-28 01:53:24 +00:00
else:
payload = 'SELECT * FROM peers WHERE trust >= ?;'
2018-09-27 00:50:54 +00:00
2018-01-26 06:28:11 +00:00
peerList = []
2018-09-27 00:50:54 +00:00
for i in c.execute(payload, (trust,)):
try:
2018-04-04 01:54:49 +00:00
if len(i[0]) != 0:
2018-05-07 06:55:03 +00:00
if getPow:
2018-05-15 05:16:00 +00:00
peerList.append(i[0] + '-' + i[1])
2018-05-07 06:55:03 +00:00
else:
peerList.append(i[0])
except TypeError:
pass
2018-09-27 00:50:54 +00:00
2018-05-07 06:55:03 +00:00
if getPow:
2018-05-07 07:46:07 +00:00
try:
peerList.append(self._crypto.pubKey + '-' + self._crypto.pubKeyPowToken)
except TypeError:
pass
2018-05-07 06:55:03 +00:00
else:
peerList.append(self._crypto.pubKey)
2018-09-27 00:50:54 +00:00
2018-01-26 06:28:11 +00:00
conn.close()
2018-09-27 00:50:54 +00:00
2018-01-26 06:28:11 +00:00
return peerList
def getPeerInfo(self, peer, info):
'''
Get info about a peer from their database entry
2018-01-26 06:28:11 +00:00
id text 0
name text, 1
2018-04-04 01:54:49 +00:00
adders text, 2
2018-09-11 19:45:06 +00:00
dateSeen not null, 3
2018-12-09 17:29:39 +00:00
trust int 4
hashID text 5
2018-01-26 06:28:11 +00:00
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.peerDB, timeout=10)
2018-01-26 06:28:11 +00:00
c = conn.cursor()
2018-09-27 00:50:54 +00:00
2018-01-26 06:28:11 +00:00
command = (peer,)
2018-12-09 17:29:39 +00:00
infoNumbers = {'id': 0, 'name': 1, 'adders': 2, 'dateSeen': 3, 'trust': 4, 'hashID': 5}
2018-01-26 06:28:11 +00:00
info = infoNumbers[info]
iterCount = 0
retVal = ''
2018-09-27 00:50:54 +00:00
for row in c.execute('SELECT * FROM peers WHERE id=?;', command):
2018-01-26 06:28:11 +00:00
for i in row:
if iterCount == info:
retVal = i
break
else:
iterCount += 1
2018-09-27 00:50:54 +00:00
2018-01-26 06:28:11 +00:00
conn.close()
2018-01-26 06:28:11 +00:00
return retVal
2018-01-28 01:53:24 +00:00
def setPeerInfo(self, peer, key, data):
'''
Update a peer for a key
'''
2018-09-27 00:50:54 +00:00
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.peerDB, timeout=10)
2018-01-28 01:53:24 +00:00
c = conn.cursor()
2018-09-27 00:50:54 +00:00
2018-01-28 01:56:59 +00:00
command = (data, peer)
2018-09-27 00:50:54 +00:00
2018-01-28 01:53:24 +00:00
# TODO: validate key on whitelist
2018-12-09 17:29:39 +00:00
if key not in ('id', 'name', 'pubkey', 'forwardKey', 'dateSeen', 'trust'):
raise Exception("Got invalid database key when setting peer info")
2018-09-27 00:50:54 +00:00
c.execute('UPDATE peers SET ' + key + ' = ? WHERE id=?', command)
2018-01-28 02:05:55 +00:00
conn.commit()
conn.close()
2018-09-27 00:50:54 +00:00
2018-02-28 00:00:37 +00:00
return
2018-01-26 06:28:11 +00:00
2018-02-28 00:00:37 +00:00
def getAddressInfo(self, address, info):
'''
Get info about an address from its database entry
address text, 0
type int, 1
knownPeer text, 2
speed int, 3
success int, 4
DBHash text, 5
2018-12-13 04:35:01 +00:00
powValue 6
failure int 7
lastConnect 8
trust 9
introduced 10
2018-02-28 00:00:37 +00:00
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.addressDB, timeout=10)
2018-02-28 00:00:37 +00:00
c = conn.cursor()
2018-02-28 00:00:37 +00:00
command = (address,)
2018-12-13 04:35:01 +00:00
infoNumbers = {'address': 0, 'type': 1, 'knownPeer': 2, 'speed': 3, 'success': 4, 'DBHash': 5, 'powValue': 6, 'failure': 7, 'lastConnect': 8, 'trust': 9, 'introduced': 10}
2018-02-28 00:00:37 +00:00
info = infoNumbers[info]
iterCount = 0
retVal = ''
2018-09-27 00:50:54 +00:00
for row in c.execute('SELECT * FROM adders WHERE address=?;', command):
2018-02-28 00:00:37 +00:00
for i in row:
if iterCount == info:
retVal = i
break
else:
iterCount += 1
conn.close()
2018-02-28 00:00:37 +00:00
return retVal
def setAddressInfo(self, address, key, data):
'''
Update an address for a key
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.addressDB, timeout=10)
2018-02-28 00:00:37 +00:00
c = conn.cursor()
2018-02-28 00:00:37 +00:00
command = (data, address)
2019-01-18 01:14:26 +00:00
2018-12-13 04:35:01 +00:00
if key not in ('address', 'type', 'knownPeer', 'speed', 'success', 'DBHash', 'failure', 'powValue', 'lastConnect', 'lastConnectAttempt', 'trust', 'introduced'):
2018-02-28 00:00:37 +00:00
raise Exception("Got invalid database key when setting address info")
2018-07-27 03:07:50 +00:00
else:
c.execute('UPDATE adders SET ' + key + ' = ? WHERE address=?', command)
conn.commit()
conn.close()
return
def getBlockList(self, dateRec = None, unsaved = False):
'''
Get list of our blocks
'''
if dateRec == None:
dateRec = 0
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
2018-01-26 06:28:11 +00:00
c = conn.cursor()
# if unsaved:
# execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
# else:
# execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;'
execute = 'SELECT hash FROM hashes WHERE dateReceived >= ? ORDER BY dateReceived ASC;'
args = (dateRec,)
rows = list()
for row in c.execute(execute, args):
2018-01-26 06:28:11 +00:00
for i in row:
rows.append(i)
conn.close()
return rows
2018-05-16 01:47:58 +00:00
def getBlockDate(self, blockHash):
'''
Returns the date a block was received
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
2018-05-16 01:47:58 +00:00
c = conn.cursor()
2018-05-16 01:47:58 +00:00
execute = 'SELECT dateReceived FROM hashes WHERE hash=?;'
args = (blockHash,)
for row in c.execute(execute, args):
for i in row:
return int(i)
conn.close()
2018-05-16 01:47:58 +00:00
return None
def getBlocksByType(self, blockType, orderDate=True):
'''
Returns a list of blocks by the type
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
if orderDate:
execute = 'SELECT hash FROM hashes WHERE dataType=? ORDER BY dateReceived;'
else:
execute = 'SELECT hash FROM hashes WHERE dataType=?;'
args = (blockType,)
rows = list()
for row in c.execute(execute, args):
for i in row:
rows.append(i)
conn.close()
return rows
2018-09-30 04:42:31 +00:00
def getExpiredBlocks(self):
'''Returns a list of expired blocks'''
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
date = int(self._utils.getEpoch())
2018-11-11 02:10:58 +00:00
execute = 'SELECT hash FROM hashes WHERE expire <= %s ORDER BY dateReceived;' % (date,)
2018-09-30 04:42:31 +00:00
rows = list()
2018-11-11 02:10:58 +00:00
for row in c.execute(execute):
2018-09-30 04:42:31 +00:00
for i in row:
rows.append(i)
conn.close()
return rows
def setBlockType(self, hash, blockType):
'''
Sets the type of block
'''
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
2018-09-27 00:50:54 +00:00
c.execute("UPDATE hashes SET dataType = ? WHERE hash = ?;", (blockType, hash))
conn.commit()
conn.close()
2018-09-27 00:50:54 +00:00
return
2018-05-02 06:01:20 +00:00
2018-04-26 07:40:39 +00:00
def updateBlockInfo(self, hash, key, data):
'''
sets info associated with a block
2018-08-04 02:52:45 +00:00
hash - the hash of a block
dateReceived - the date the block was recieved, not necessarily when it was created
decrypted - if we can successfully decrypt the block (does not describe its current state)
dataType - data type of the block
dataFound - if the data has been found for the block
dataSaved - if the data has been saved for the block
sig - optional signature by the author (not optional if author is specified)
author - multi-round partial sha3-256 hash of authors public key
dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
2018-09-30 04:42:31 +00:00
expire - expire date for a block
2018-04-26 07:40:39 +00:00
'''
2018-09-30 04:42:31 +00:00
if key not in ('dateReceived', 'decrypted', 'dataType', 'dataFound', 'dataSaved', 'sig', 'author', 'dateClaimed', 'expire'):
2018-04-26 07:40:39 +00:00
return False
2018-09-28 17:29:07 +00:00
conn = sqlite3.connect(self.blockDB, timeout=10)
2018-04-26 07:40:39 +00:00
c = conn.cursor()
args = (data, hash)
c.execute("UPDATE hashes SET " + key + " = ? where hash = ?;", args)
conn.commit()
conn.close()
2018-09-27 00:50:54 +00:00
2018-04-26 07:40:39 +00:00
return True
def insertBlock(self, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False):
'''
Inserts a block into the network
encryptType must be specified to encrypt a block
'''
2018-12-24 06:12:46 +00:00
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
if self._utils.storageCounter.isFull():
logger.error(allocationReachedMessage)
return False
retData = False
# check nonce
dataNonce = self._utils.bytesToStr(self._crypto.sha3Hash(data))
try:
with open(self.dataNonceFile, 'r') as nonces:
if dataNonce in nonces:
return retData
except FileNotFoundError:
pass
# record nonce
with open(self.dataNonceFile, 'a') as nonceFile:
nonceFile.write(dataNonce + '\n')
2018-07-04 19:07:17 +00:00
if type(data) is bytes:
data = data.decode()
data = str(data)
retData = ''
signature = ''
signer = ''
metadata = {}
# metadata is full block metadata, meta is internal, user specified metadata
# only use header if not set in provided meta
2018-12-09 17:29:39 +00:00
meta['type'] = str(header)
if encryptType in ('asym', 'sym', ''):
metadata['encryptType'] = encryptType
else:
raise onionrexceptions.InvalidMetadata('encryptType must be asym or sym, or blank')
try:
data = data.encode()
except AttributeError:
pass
2018-11-09 19:07:26 +00:00
if encryptType == 'asym':
if not disableForward and asymPeer != self._crypto.pubKey:
try:
forwardEncrypted = onionrusers.OnionrUser(self, asymPeer).forwardEncrypt(data)
data = forwardEncrypted[0]
meta['forwardEnc'] = True
except onionrexceptions.InvalidPubkey:
pass
#onionrusers.OnionrUser(self, asymPeer).generateForwardKey()
fsKey = onionrusers.OnionrUser(self, asymPeer).generateForwardKey()
#fsKey = onionrusers.OnionrUser(self, asymPeer).getGeneratedForwardKeys().reverse()
meta['newFSKey'] = fsKey
2018-10-08 05:11:46 +00:00
jsonMeta = json.dumps(meta)
if sign:
signature = self._crypto.edSign(jsonMeta.encode() + data, key=self._crypto.privKey, encodeResult=True)
2018-07-17 07:18:17 +00:00
signer = self._crypto.pubKey
if len(jsonMeta) > 1000:
raise onionrexceptions.InvalidMetadata('meta in json encoded form must not exceed 1000 bytes')
2018-07-04 19:07:17 +00:00
2018-10-06 18:06:46 +00:00
user = onionrusers.OnionrUser(self, symKey)
2018-06-20 20:56:28 +00:00
# encrypt block metadata/sig/content
if encryptType == 'sym':
2018-10-06 18:06:46 +00:00
if len(symKey) < self.requirements.passwordLength:
raise onionrexceptions.SecurityError('Weak encryption key')
jsonMeta = self._crypto.symmetricEncrypt(jsonMeta, key=symKey, returnEncoded=True).decode()
data = self._crypto.symmetricEncrypt(data, key=symKey, returnEncoded=True).decode()
signature = self._crypto.symmetricEncrypt(signature, key=symKey, returnEncoded=True).decode()
signer = self._crypto.symmetricEncrypt(signer, key=symKey, returnEncoded=True).decode()
elif encryptType == 'asym':
if self._utils.validatePubKey(asymPeer):
2018-10-07 20:39:22 +00:00
# Encrypt block data with forward secrecy key first, but not meta
jsonMeta = json.dumps(meta)
jsonMeta = self._crypto.pubKeyEncrypt(jsonMeta, asymPeer, encodedData=True, anonymous=True).decode()
data = self._crypto.pubKeyEncrypt(data, asymPeer, encodedData=True, anonymous=True).decode()
signature = self._crypto.pubKeyEncrypt(signature, asymPeer, encodedData=True, anonymous=True).decode()
signer = self._crypto.pubKeyEncrypt(signer, asymPeer, encodedData=True, anonymous=True).decode()
2018-12-09 17:29:39 +00:00
onionrusers.OnionrUser(self, asymPeer, saveUser=True)
else:
raise onionrexceptions.InvalidPubkey(asymPeer + ' is not a valid base32 encoded ed25519 key')
2018-08-04 02:52:45 +00:00
2018-06-20 20:56:28 +00:00
# compile metadata
metadata['meta'] = jsonMeta
metadata['sig'] = signature
metadata['signer'] = signer
metadata['time'] = self._utils.getRoundedEpoch()
2018-09-24 23:48:00 +00:00
2018-09-30 16:53:39 +00:00
# ensure expire is integer and of sane length
if type(expire) is not type(None):
assert len(str(int(expire))) < 14
metadata['expire'] = expire
2018-07-08 07:51:23 +00:00
# send block data (and metadata) to POW module to get tokenized block data
proof = onionrproofs.POW(metadata, data)
payload = proof.waitForResult()
if payload != False:
2018-12-24 06:12:46 +00:00
try:
retData = self.setData(payload)
except onionrexceptions.DiskAllocationReached:
logger.error(allocationReachedMessage)
retData = False
else:
# Tell the api server through localCommand to wait for the daemon to upload this block to make stastical analysis more difficult
self._utils.localCommand('waitforshare/' + retData)
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
#self.setBlockType(retData, meta['type'])
self._utils.processBlockMetadata(retData)
self.daemonQueueAdd('uploadBlock', retData)
2018-07-11 07:35:22 +00:00
if retData != False:
events.event('insertBlock', onionr = None, threaded = False)
2018-04-19 01:17:47 +00:00
return retData
2018-04-19 02:16:10 +00:00
2018-04-19 01:17:47 +00:00
def introduceNode(self):
'''
Introduces our node into the network by telling X many nodes our HS address
'''
2018-12-09 17:29:39 +00:00
if(self._utils.isCommunicatorRunning(timeout=30)):
announceAmount = 2
nodeList = self.listAdders()
if len(nodeList) == 0:
for i in self.bootstrapList:
if self._utils.validateID(i):
self.addAddress(i)
nodeList.append(i)
if announceAmount > len(nodeList):
announceAmount = len(nodeList)
for i in range(announceAmount):
self.daemonQueueAdd('announceNode', nodeList[i])
events.event('introduction', onionr = None)
return True
else:
logger.error('Onionr daemon is not running.')
return False
2018-04-19 02:16:10 +00:00
return