progress in removing core
This commit is contained in:
parent
274505a51f
commit
3097407774
@ -20,13 +20,13 @@
|
|||||||
import onionrexceptions, logger
|
import onionrexceptions, logger
|
||||||
from onionrutils import validatemetadata, blockmetadata
|
from onionrutils import validatemetadata, blockmetadata
|
||||||
from coredb import blockmetadb
|
from coredb import blockmetadb
|
||||||
import onionrcrypto, onionrblacklist, onionrstorage
|
import onionrblacklist, onionrstorage
|
||||||
|
import onionrcrypto as crypto
|
||||||
def importBlockFromData(content):
|
def importBlockFromData(content):
|
||||||
crypto = onionrcrypto.OnionrCrypto()
|
|
||||||
blacklist = onionrblacklist.OnionrBlackList()
|
blacklist = onionrblacklist.OnionrBlackList()
|
||||||
retData = False
|
retData = False
|
||||||
|
|
||||||
dataHash = crypto.sha3Hash(content)
|
dataHash = crypto.hashers.sha3_hash(content)
|
||||||
|
|
||||||
if blacklist.inBlacklist(dataHash):
|
if blacklist.inBlacklist(dataHash):
|
||||||
raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
|
raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
|
||||||
@ -39,10 +39,10 @@ def importBlockFromData(content):
|
|||||||
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
||||||
metadata = metas[0]
|
metadata = metas[0]
|
||||||
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid
|
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid
|
||||||
if crypto.verifyPow(content): # check if POW is enough/correct
|
if crypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
|
||||||
logger.info('Block passed proof, saving.', terminal=True)
|
logger.info('Block passed proof, saving.', terminal=True)
|
||||||
try:
|
try:
|
||||||
blockHash = onionrstorage.setdata(content)
|
blockHash = onionrstorage.set_data(content)
|
||||||
except onionrexceptions.DiskAllocationReached:
|
except onionrexceptions.DiskAllocationReached:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
@ -21,6 +21,7 @@ import base64
|
|||||||
import onionrproofs, logger
|
import onionrproofs, logger
|
||||||
from etc import onionrvalues
|
from etc import onionrvalues
|
||||||
from onionrutils import basicrequests, bytesconverter
|
from onionrutils import basicrequests, bytesconverter
|
||||||
|
from utils import gettransports
|
||||||
from communicator import onlinepeers
|
from communicator import onlinepeers
|
||||||
from coredb import keydb
|
from coredb import keydb
|
||||||
def announce_node(daemon):
|
def announce_node(daemon):
|
||||||
@ -43,7 +44,10 @@ def announce_node(daemon):
|
|||||||
peer = onlinepeers.pick_online_peer(daemon)
|
peer = onlinepeers.pick_online_peer(daemon)
|
||||||
|
|
||||||
for x in range(1):
|
for x in range(1):
|
||||||
ourID = daemon.hsAddress
|
try:
|
||||||
|
ourID = gettransports.get()[0]
|
||||||
|
except IndexError:
|
||||||
|
break
|
||||||
|
|
||||||
url = 'http://' + peer + '/announce'
|
url = 'http://' + peer + '/announce'
|
||||||
data = {'node': ourID}
|
data = {'node': ourID}
|
||||||
|
@ -57,7 +57,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||||||
if not config.get('tor.v3onions') and len(address) == 62:
|
if not config.get('tor.v3onions') and len(address) == 62:
|
||||||
continue
|
continue
|
||||||
# Don't connect to our own address
|
# Don't connect to our own address
|
||||||
if address == comm_inst.hsAddress:
|
if address in transports:
|
||||||
continue
|
continue
|
||||||
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
|
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
|
||||||
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
|
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
|
||||||
|
@ -78,7 +78,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||||||
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
|
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
|
||||||
logger.info('Attempting to save block %s...' % blockHash[:12])
|
logger.info('Attempting to save block %s...' % blockHash[:12])
|
||||||
try:
|
try:
|
||||||
onionrstorage.setdata.set_data(content)
|
onionrstorage.set_data(content)
|
||||||
except onionrexceptions.DataExists:
|
except onionrexceptions.DataExists:
|
||||||
logger.warn('Data is already set for %s ' % (blockHash,))
|
logger.warn('Data is already set for %s ' % (blockHash,))
|
||||||
except onionrexceptions.DiskAllocationReached:
|
except onionrexceptions.DiskAllocationReached:
|
||||||
|
@ -22,7 +22,7 @@ import logger
|
|||||||
from onionrusers import onionrusers
|
from onionrusers import onionrusers
|
||||||
from onionrutils import epoch
|
from onionrutils import epoch
|
||||||
from coredb import blockmetadb, dbfiles
|
from coredb import blockmetadb, dbfiles
|
||||||
from onionrstorage import removeblock, setdata
|
from onionrstorage import removeblock
|
||||||
def clean_old_blocks(comm_inst):
|
def clean_old_blocks(comm_inst):
|
||||||
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
|
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
|
||||||
|
|
||||||
|
@ -21,13 +21,12 @@ import logger
|
|||||||
from onionrutils import stringvalidators
|
from onionrutils import stringvalidators
|
||||||
from communicator import peeraction, onlinepeers
|
from communicator import peeraction, onlinepeers
|
||||||
from utils import gettransports
|
from utils import gettransports
|
||||||
transports = gettransports.get()
|
|
||||||
def lookup_new_peer_transports_with_communicator(comm_inst):
|
def lookup_new_peer_transports_with_communicator(comm_inst):
|
||||||
logger.info('Looking up new addresses...')
|
logger.info('Looking up new addresses...')
|
||||||
tryAmount = 1
|
tryAmount = 1
|
||||||
newPeers = []
|
newPeers = []
|
||||||
if len(transports) == 0:
|
transports = gettransports.get()
|
||||||
transports = list(gettransports.get())
|
|
||||||
for i in range(tryAmount):
|
for i in range(tryAmount):
|
||||||
# Download new peer address list from random online peers
|
# Download new peer address list from random online peers
|
||||||
if len(newPeers) > 10000:
|
if len(newPeers) > 10000:
|
||||||
|
@ -45,7 +45,7 @@ def upload_blocks_from_communicator(comm_inst):
|
|||||||
data = {'block': block.Block(bl).getRaw()}
|
data = {'block': block.Block(bl).getRaw()}
|
||||||
proxyType = proxypicker.pick_proxy(peer)
|
proxyType = proxypicker.pick_proxy(peer)
|
||||||
logger.info("Uploading block to " + peer, terminal=True)
|
logger.info("Uploading block to " + peer, terminal=True)
|
||||||
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False:
|
if not basicrequests.do_post_request(comm_inst.onionrInst, url, data=data, proxyType=proxyType) == False:
|
||||||
localcommand.local_command('waitforshare/' + bl, post=True)
|
localcommand.local_command('waitforshare/' + bl, post=True)
|
||||||
finishedUploads.append(bl)
|
finishedUploads.append(bl)
|
||||||
for x in finishedUploads:
|
for x in finishedUploads:
|
||||||
|
@ -21,6 +21,7 @@ import sqlite3
|
|||||||
from . import expiredblocks, updateblockinfo, add
|
from . import expiredblocks, updateblockinfo, add
|
||||||
from .. import dbfiles
|
from .. import dbfiles
|
||||||
update_block_info = updateblockinfo.update_block_info
|
update_block_info = updateblockinfo.update_block_info
|
||||||
|
add_to_block_DB = add.add_to_block_DB
|
||||||
def get_block_list(dateRec = None, unsaved = False):
|
def get_block_list(dateRec = None, unsaved = False):
|
||||||
'''
|
'''
|
||||||
Get list of our blocks
|
Get list of our blocks
|
||||||
|
@ -25,14 +25,12 @@ class PublicAPISecurity:
|
|||||||
def __init__(self, public_api):
|
def __init__(self, public_api):
|
||||||
public_api_security_bp = Blueprint('publicapisecurity', __name__)
|
public_api_security_bp = Blueprint('publicapisecurity', __name__)
|
||||||
self.public_api_security_bp = public_api_security_bp
|
self.public_api_security_bp = public_api_security_bp
|
||||||
transports = gettransports.get()
|
|
||||||
|
|
||||||
@public_api_security_bp.before_app_request
|
@public_api_security_bp.before_app_request
|
||||||
def validate_request():
|
def validate_request():
|
||||||
'''Validate request has the correct hostname'''
|
'''Validate request has the correct hostname'''
|
||||||
# If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P)
|
# If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P)
|
||||||
if len(transports) == 0:
|
transports = gettransports.get()
|
||||||
transports = list(gettransports.get())
|
|
||||||
if public_api.config.get('general.security_level', default=1) > 0:
|
if public_api.config.get('general.security_level', default=1) > 0:
|
||||||
abort(403)
|
abort(403)
|
||||||
if request.host not in transports:
|
if request.host not in transports:
|
||||||
|
@ -124,7 +124,7 @@ def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asym
|
|||||||
payload = onionrproofs.POW(metadata, data).waitForResult()
|
payload = onionrproofs.POW(metadata, data).waitForResult()
|
||||||
if payload != False:
|
if payload != False:
|
||||||
try:
|
try:
|
||||||
retData = onionrstorage.setdata.set_data(payload)
|
retData = onionrstorage.set_data(payload)
|
||||||
except onionrexceptions.DiskAllocationReached:
|
except onionrexceptions.DiskAllocationReached:
|
||||||
logger.error(allocationReachedMessage)
|
logger.error(allocationReachedMessage)
|
||||||
retData = False
|
retData = False
|
||||||
|
@ -22,8 +22,11 @@ from onionrutils import bytesconverter, stringvalidators
|
|||||||
from coredb import dbfiles
|
from coredb import dbfiles
|
||||||
import filepaths, onionrcrypto, dbcreator, onionrexceptions
|
import filepaths, onionrcrypto, dbcreator, onionrexceptions
|
||||||
from onionrcrypto import hashers
|
from onionrcrypto import hashers
|
||||||
|
from . import setdata
|
||||||
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
|
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
|
||||||
|
|
||||||
|
set_data = setdata.set_data
|
||||||
|
|
||||||
def _dbInsert(blockHash, data):
|
def _dbInsert(blockHash, data):
|
||||||
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
|
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
Loading…
Reference in New Issue
Block a user