renamed communicator, bug fixes and added work on onionfragment.py

This commit is contained in:
Kevin Froman 2019-01-28 00:06:20 -06:00
parent 0f4626a68c
commit e60503771e
8 changed files with 109 additions and 120 deletions

View File

@ -567,6 +567,7 @@ class OnionrCommunicatorDaemon:
# when inserting a block, we try to upload it to a few peers to add some deniability
triedPeers = []
finishedUploads = []
self.blocksToUpload = self._core._crypto.randomShuffle(self.blocksToUpload)
if len(self.blocksToUpload) != 0:
for bl in self.blocksToUpload:
if not self._core._utils.validateHash(bl):

View File

@ -99,6 +99,7 @@ class Core:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
self._utils = onionrutils.OnionrUtils(self)
self.blockCache = onionrstorage.BlockCache()
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)

View File

@ -33,7 +33,7 @@ import onionrutils
import netcontroller
from netcontroller import NetController
from onionrblockapi import Block
import onionrproofs, onionrexceptions, onionrusers, communicator2
import onionrproofs, onionrexceptions, onionrusers, communicator
try:
from urllib3.contrib.socks import SOCKSProxyManager
@ -710,7 +710,6 @@ class Onionr:
'''
Starts the Onionr communication daemon
'''
communicatorDaemon = './communicator2.py'
# remove runcheck if it exists
if os.path.isfile('data/.runcheck'):
@ -750,10 +749,8 @@ class Onionr:
logger.debug('Using public key: %s' % (logger.colors.underline + self.onionrCore._crypto.pubKey))
time.sleep(1)
# TODO: make runable on windows
#communicatorProc = subprocess.Popen([communicatorDaemon, 'run', str(net.socksPort)])
self.onionrCore.torPort = net.socksPort
communicatorThread = Thread(target=communicator2.startCommunicator, args=(self, str(net.socksPort)))
communicatorThread = Thread(target=communicator.startCommunicator, args=(self, str(net.socksPort)))
communicatorThread.start()
while self.communicatorInst is None:
@ -940,7 +937,8 @@ class Onionr:
logger.error('Block hash is invalid')
return
Block.mergeChain(bHash, fileName)
with open(fileName, 'wb') as myFile:
myFile.write(base64.b64decode(Block(bHash, core=self.onionrCore).bcontent))
return
def addWebpage(self):
@ -963,12 +961,9 @@ class Onionr:
return
logger.info('Adding file... this might take a long time.')
try:
if singleBlock:
with open(filename, 'rb') as singleFile:
blockhash = self.onionrCore.insertBlock(base64.b64encode(singleFile.read()), header=blockType)
else:
blockhash = Block.createChain(file = filename)
logger.info('File %s saved in block %s.' % (filename, blockhash))
logger.info('File %s saved in block %s' % (filename, blockhash))
except:
logger.error('Failed to save file in block.', timestamp = False)
else:

View File

@ -1,7 +1,7 @@
'''
Onionr - P2P Anonymous Storage Network
This class contains the OnionrBlocks class which is a class for working with Onionr blocks
This file contains the OnionrBlocks class which is a class for working with Onionr blocks
'''
'''
This program is free software: you can redistribute it and/or modify
@ -56,18 +56,7 @@ class Block:
if self.getCore() is None:
self.core = onionrcore.Core()
# update the blocks' contents if it exists
if not self.getHash() is None:
if not self.core._utils.validateHash(self.hash):
logger.debug('Block hash %s is invalid.' % self.getHash())
raise onionrexceptions.InvalidHexHash('Block hash is invalid.')
elif not self.update():
logger.debug('Failed to open block %s.' % self.getHash())
else:
pass
#logger.debug('Did not update block.')
# logic
self.update()
def decrypt(self, anonymous = True, encodedData = True):
'''
@ -140,13 +129,15 @@ class Block:
Outputs:
- (bool): indicates whether or not the operation was successful
'''
try:
# import from string
blockdata = data
# import from file
if blockdata is None:
blockdata = onionrstorage.getData(self.core, self.getHash()).decode()
'''
filelocation = file
readfile = True
@ -169,6 +160,7 @@ class Block:
#blockdata = f.read().decode()
self.blockFile = filelocation
'''
else:
self.blockFile = None
# parse block
@ -588,7 +580,7 @@ class Block:
return list()
def mergeChain(child, file = None, maximumFollows = 32, core = None):
def mergeChain(child, file = None, maximumFollows = 1000, core = None):
'''
Follows a child Block to its root parent Block, merging content
@ -635,7 +627,7 @@ class Block:
blocks.append(block.getHash())
buffer = ''
buffer = b''
# combine block contents
for hash in blocks:
@ -644,101 +636,17 @@ class Block:
contents = base64.b64decode(contents.encode())
if file is None:
buffer += contents.decode()
try:
buffer += contents.encode()
except AttributeError:
buffer += contents
else:
file.write(contents)
if file is not None:
file.close()
return (None if not file is None else buffer)
def createChain(data = None, chunksize = 99800, file = None, type = 'chunk', sign = True, encrypt = False, verbose = False):
'''
Creates a chain of blocks to store larger amounts of data
The chunksize is set to 99800 because it provides the least amount of PoW for the most amount of data.
Inputs:
- data (*): if `file` is None, the data to be stored in blocks
- file (file/str): the filename or file object to read from (or None to read `data` instead)
- chunksize (int): the number of bytes per block chunk
- type (str): the type header for each of the blocks
- sign (bool): whether or not to sign each block
- encrypt (str): the public key to encrypt to, or False to disable encryption
- verbose (bool): whether or not to return a tuple containing more info
Outputs:
- if `verbose`:
- (tuple):
- (str): the child block hash
- (list): all block hashes associated with storing the file
- if not `verbose`:
- (str): the child block hash
'''
blocks = list()
# initial datatype checks
if data is None and file is None:
return blocks
elif not (file is None or (isinstance(file, str) and os.path.exists(file))):
return blocks
elif isinstance(file, str):
file = open(file, 'rb')
if not isinstance(data, str):
data = str(data)
if not file is None:
filesize = os.stat(file.name).st_size
offset = filesize % chunksize
maxtimes = int(filesize / chunksize)
for times in range(0, maxtimes + 1):
# read chunksize bytes from the file (end -> beginning)
if times < maxtimes:
file.seek(- ((times + 1) * chunksize), 2)
content = file.read(chunksize)
else:
file.seek(0, 0)
content = file.read(offset)
# encode it- python is really bad at handling certain bytes that
# are often present in binaries.
content = base64.b64encode(content).decode()
# if it is the end of the file, exit
if not content:
break
# create block
block = Block()
block.setType(type)
block.setContent(content)
block.setParent((blocks[-1] if len(blocks) != 0 else None))
hash = block.save(sign = sign)
# remember the hash in cache
blocks.append(hash)
elif not data is None:
for content in reversed([data[n:n + chunksize] for n in range(0, len(data), chunksize)]):
# encode chunk with base64
content = base64.b64encode(content.encode()).decode()
# create block
block = Block()
block.setType(type)
block.setContent(content)
block.setParent((blocks[-1] if len(blocks) != 0 else None))
hash = block.save(sign = sign)
# remember the hash in cache
blocks.append(hash)
# return different things depending on verbosity
if verbose:
return (blocks[-1], blocks)
file.close()
return blocks[-1]
def exists(bHash):
'''
Checks if a block is saved to file or not
@ -799,7 +707,7 @@ class Block:
if block.getHash() in Block.getCache() and not override:
return False
# dump old cached blocks if the size exeeds the maximum
# dump old cached blocks if the size exceeds the maximum
if sys.getsizeof(Block.blockCacheOrder) >= config.get('allocations.block_cache_total', 50000000): # 50MB default cache size
del Block.blockCache[blockCacheOrder.pop(0)]

View File

@ -34,7 +34,7 @@ class DaemonTools:
'''Announce our node to our peers'''
retData = False
announceFail = False
if config.get('general.security_level') == 0:
if self.daemon._core.config('general.security_level') == 0:
# Announce to random online peers
for i in self.daemon.onlinePeers:
if not i in self.announceCache:

View File

@ -53,6 +53,9 @@ class BlacklistedBlock(Exception):
class DataExists(Exception):
pass
class NoDataAvailable(Exception):
pass
class InvalidHexHash(Exception):
'''When a string is not a valid hex string of appropriate length for a hash value'''
pass

73
onionr/onionrfragment.py Normal file
View File

@ -0,0 +1,73 @@
'''
Onionr - P2P Anonymous Storage Network
This file contains the OnionrFragment class which implements the fragment system
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
# onionr:10ch+10ch+10chgdecryptionkey
import core, sys, binascii, os
FRAGMENT_SIZE = 0.25
TRUNCATE_LENGTH = 30
class OnionrFragment:
def __init__(self, uri=None):
uri = uri.replace('onionr:', '')
count = 0
blocks = []
appendData = ''
key = ''
for x in uri:
if x == 'k':
key = uri[uri.index('k') + 1:]
appendData += x
if count == TRUNCATE_LENGTH:
blocks.append(appendData)
appendData = ''
count = 0
count += 1
self.key = key
self.blocks = blocks
return
@staticmethod
def generateFragments(data=None, coreInst=None):
if coreInst is None:
coreInst = core.Core()
key = os.urandom(32)
data = coreInst._crypto.symmetricEncrypt(data, key).decode()
blocks = []
blockData = b""
uri = "onionr:"
total = sys.getsizeof(data)
for x in data:
blockData += x.encode()
if round(len(blockData) / len(data), 3) > FRAGMENT_SIZE:
blocks.append(core.Core().insertBlock(blockData))
blockData = b""
for bl in blocks:
uri += bl[:TRUNCATE_LENGTH]
uri += "k"
uri += binascii.hexlify(key).decode()
return (uri, key)
if __name__ == '__main__':
uri = OnionrFragment.generateFragments("test")[0]
print(uri)
OnionrFragment(uri)

View File

@ -21,6 +21,13 @@ import core, sys, sqlite3, os, dbcreator
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
class BlockCache:
def __init__(self):
self.blocks = {}
def cleanCache(self):
while sys.getsizeof(self.blocks) > 100000000:
self.blocks.pop(list(self.blocks.keys())[0])
def dbCreate(coreInst):
try:
dbcreator.DBCreator(coreInst).createBlockDataDB()
@ -62,6 +69,7 @@ def store(coreInst, data, blockHash=''):
else:
with open('%s/%s.dat' % (coreInst.blockDataLocation, blockHash), 'wb') as blockFile:
blockFile.write(data)
coreInst.blockCache.cleanCache()
def getData(coreInst, bHash):
assert isinstance(coreInst, core.Core)