sync and onionrservices fixes

This commit is contained in:
Kevin Froman 2019-06-29 13:18:31 -05:00
parent d70afbf92b
commit cff38cb7c2
6 changed files with 24 additions and 17 deletions

View File

@ -33,11 +33,17 @@ from onionrutils import bytesconverter, stringvalidators, epoch, mnemonickeys
config.reload()
class FDSafeHandler(WSGIHandler):
'''Our WSGI handler. Doesn't do much non-default except timeouts'''
def __init__(self, sock, address, server, rfile=None):
self.socket = sock
self.address = address
self.server = server
self.rfile = rfile
def handle(self):
timeout = Timeout(60, exception=Exception)
timeout.start()
while True:
timeout = Timeout(120, exception=Exception)
try:
WSGIHandler.handle(self)
FDSafeHandler.handle(self)
timeout.start()
except Timeout as ex:
raise

View File

@ -80,19 +80,21 @@ def download_blocks_from_communicator(comm_inst):
logger.info('Attempting to save block %s...' % blockHash[:12])
try:
comm_inst._core.setData(content)
except onionrexceptions.DataExists:
logger.warn('Data is already set for %s ' % (blockHash,))
except onionrexceptions.DiskAllocationReached:
logger.error('Reached disk allocation allowance, cannot save block %s.' % blockHash)
logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
removeFromQueue = False
else:
comm_inst._core.addToBlockDB(blockHash, dataSaved=True)
blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database
else:
logger.warn('POW failed for block %s.' % blockHash)
logger.warn('POW failed for block %s.' % (blockHash,))
else:
if comm_inst._core._blacklist.inBlacklist(realHash):
logger.warn('Block %s is blacklisted.' % (realHash,))
else:
logger.warn('Metadata for block %s is invalid.' % blockHash)
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
comm_inst._core._blacklist.addToDB(blockHash)
else:
# if block didn't meet expected hash

View File

@ -21,7 +21,7 @@ import logger
from onionrutils import stringvalidators
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
logger.info('Looking up new addresses...', terminal=True)
tryAmount = 1
newPeers = []
for i in range(tryAmount):

View File

@ -18,12 +18,11 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import communicator, onionrblockapi
from onionrutils import stringvalidators
from onionrutils import stringvalidators, bytesconverter
def service_creator(daemon):
assert isinstance(daemon, communicator.OnionrCommunicatorDaemon)
core = daemon._core
utils = core._utils
# Find socket connection blocks
# TODO cache blocks and only look at recently received ones
@ -31,9 +30,9 @@ def service_creator(daemon):
for b in con_blocks:
if not b in daemon.active_services:
bl = onionrblockapi.Block(b, core=core, decrypt=True)
bs = utils.bytesToStr(bl.bcontent) + '.onion'
bs = bytesconverter.bytes_to_str(bl.bcontent) + '.onion'
if stringvalidators.validate_pub_key(bl.signer) and stringvalidators.validate_transport(bs):
signer = utils.bytesToStr(bl.signer)
signer = bytesconverter.bytes_to_str(bl.signer)
daemon.active_services.append(b)
daemon.active_services.append(signer)
daemon.services.create_server(signer, bs)

View File

@ -24,7 +24,7 @@ import core, logger, httpapi
import onionrexceptions
from netcontroller import getOpenPort
import api
from onionrutils import stringvalidators, basicrequests
from onionrutils import stringvalidators, basicrequests, bytesconverter
from . import httpheaders
class ConnectionServer:
@ -82,7 +82,7 @@ class ConnectionServer:
raise ConnectionError('Could not reach %s bootstrap address %s' % (peer, address))
else:
# If no connection error, create the service and save it to local global key store
self.core_inst.keyStore.put('dc-' + response.service_id, self.core_inst._utils.bytesToStr(peer))
self.core_inst.keyStore.put('dc-' + response.service_id, bytesconverter.bytes_to_str(peer))
logger.info('hosting on %s with %s' % (response.service_id, peer))
http_server.serve_forever()
http_server.stop()

View File

@ -31,6 +31,6 @@ def set_data(core_inst, data):
else:
raise onionrexceptions.DiskAllocationReached
else:
raise Exception("Data is already set for " + dataHash)
raise onionrexceptions.DataExists("Data is already set for " + dataHash)
return dataHash