work on moving to vdf
This commit is contained in:
parent
6a8ab46815
commit
2352e066cc
@ -25,6 +25,7 @@ import onionrcrypto
|
|||||||
import onionrstorage
|
import onionrstorage
|
||||||
from onionrblocks import onionrblacklist
|
from onionrblocks import onionrblacklist
|
||||||
from onionrblocks import storagecounter
|
from onionrblocks import storagecounter
|
||||||
|
from onionrproofs import vdf
|
||||||
from . import shoulddownload
|
from . import shoulddownload
|
||||||
"""
|
"""
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
@ -100,12 +101,7 @@ def download_blocks_from_communicator(shared_state: "TooMany"):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
realHash = onionrcrypto.hashers.sha3_hash(content)
|
if vdf.verify_vdf(blockHash, content):
|
||||||
try:
|
|
||||||
realHash = realHash.decode() # bytes on some versions for some reason
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
if realHash == blockHash:
|
|
||||||
#content = content.decode() # decode here because sha3Hash needs bytes above
|
#content = content.decode() # decode here because sha3Hash needs bytes above
|
||||||
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
||||||
metadata = metas[0]
|
metadata = metas[0]
|
||||||
@ -118,30 +114,10 @@ def download_blocks_from_communicator(shared_state: "TooMany"):
|
|||||||
except onionrexceptions.DataExists:
|
except onionrexceptions.DataExists:
|
||||||
metadata_validation_result = False
|
metadata_validation_result = False
|
||||||
if metadata_validation_result: # check if metadata is valid, and verify nonce
|
if metadata_validation_result: # check if metadata is valid, and verify nonce
|
||||||
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
|
save_block(blockHash, data)
|
||||||
logger.info('Attempting to save block %s...' % blockHash[:12])
|
|
||||||
try:
|
|
||||||
onionrstorage.set_data(content)
|
|
||||||
except onionrexceptions.DataExists:
|
|
||||||
logger.warn('Data is already set for %s ' % (blockHash,))
|
|
||||||
except onionrexceptions.DiskAllocationReached:
|
|
||||||
logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
|
|
||||||
removeFromQueue = False
|
|
||||||
else:
|
else:
|
||||||
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
|
if blacklist.inBlacklist(blockHash):
|
||||||
blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
|
logger.warn(f'Block {blockHash} is blacklisted.')
|
||||||
spawn(
|
|
||||||
local_command,
|
|
||||||
f'/daemon-event/upload_event',
|
|
||||||
post=True,
|
|
||||||
is_json=True,
|
|
||||||
post_data={'block': blockHash}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn('POW failed for block %s.' % (blockHash,))
|
|
||||||
else:
|
|
||||||
if blacklist.inBlacklist(realHash):
|
|
||||||
logger.warn('Block %s is blacklisted.' % (realHash,))
|
|
||||||
else:
|
else:
|
||||||
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
|
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
|
||||||
blacklist.addToDB(blockHash)
|
blacklist.addToDB(blockHash)
|
||||||
@ -154,13 +130,6 @@ def download_blocks_from_communicator(shared_state: "TooMany"):
|
|||||||
pass
|
pass
|
||||||
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
||||||
onionrpeers.PeerProfiles(peerUsed).addScore(-50)
|
onionrpeers.PeerProfiles(peerUsed).addScore(-50)
|
||||||
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
|
|
||||||
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
|
|
||||||
logger.warn(
|
|
||||||
'Block hash validation failed for ' +
|
|
||||||
blockHash + ' got ' + tempHash)
|
|
||||||
else:
|
|
||||||
removeFromQueue = False # Don't remove from queue if 404
|
|
||||||
if removeFromQueue:
|
if removeFromQueue:
|
||||||
try:
|
try:
|
||||||
del kv.get('blockQueue')[blockHash] # remove from block queue both if success or false
|
del kv.get('blockQueue')[blockHash] # remove from block queue both if success or false
|
||||||
|
37
src/db/__init__.py
Normal file
37
src/db/__init__.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
import dbm
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
timeout = 120
|
||||||
|
|
||||||
|
|
||||||
|
def _do_timeout(func, *args):
|
||||||
|
ts = 0
|
||||||
|
res = None
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
res = func(*args)
|
||||||
|
except dbm.error:
|
||||||
|
if not ts:
|
||||||
|
ts = time.time()
|
||||||
|
continue
|
||||||
|
if time.time() - ts > timeout:
|
||||||
|
raise TimeoutError()
|
||||||
|
time.sleep(0.1)
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def set(db_path, key, value):
|
||||||
|
def _set(key, value):
|
||||||
|
with dbm.open(db_path, "c") as my_db:
|
||||||
|
my_db[key] = value
|
||||||
|
_do_timeout(_set, key, value)
|
||||||
|
|
||||||
|
|
||||||
|
def get(db_path, key):
|
||||||
|
def _get(key):
|
||||||
|
with dbm.open(db_path, "c") as my_db:
|
||||||
|
return my_db[key]
|
||||||
|
return _do_timeout(_get, key)
|
||||||
|
|
@ -36,3 +36,5 @@ log_file = home + 'onionr.log'
|
|||||||
ephemeral_services_file = home + 'ephemeral-services.list'
|
ephemeral_services_file = home + 'ephemeral-services.list'
|
||||||
|
|
||||||
restarting_indicator = home + "is-restarting"
|
restarting_indicator = home + "is-restarting"
|
||||||
|
|
||||||
|
block_database = home + "blocks.db"
|
||||||
|
@ -84,7 +84,7 @@ class StorageCounter:
|
|||||||
new_amount = amount + self.amount
|
new_amount = amount + self.amount
|
||||||
ret_data = new_amount
|
ret_data = new_amount
|
||||||
if new_amount > config.get('allocations.disk', 2000000000):
|
if new_amount > config.get('allocations.disk', 2000000000):
|
||||||
ret_data = False
|
ret_data = 0
|
||||||
else:
|
else:
|
||||||
self._update(new_amount)
|
self._update(new_amount)
|
||||||
return ret_data
|
return ret_data
|
||||||
|
@ -12,6 +12,7 @@ from onionrutils import bytesconverter
|
|||||||
from onionrcrypto import hashers
|
from onionrcrypto import hashers
|
||||||
|
|
||||||
from .blocknoncestart import BLOCK_NONCE_START_INT
|
from .blocknoncestart import BLOCK_NONCE_START_INT
|
||||||
|
from .vdf import create_vdf
|
||||||
"""
|
"""
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
|
@ -7,9 +7,37 @@ def _wrap_vdf_create(queue, block_data_bytes, rounds):
|
|||||||
queue.put(mimcvdf.vdf_create(block_data_bytes, rounds))
|
queue.put(mimcvdf.vdf_create(block_data_bytes, rounds))
|
||||||
|
|
||||||
|
|
||||||
def do_vdf(block_data_bytes):
|
def _wrap_vdf_verify(queue, block_data_bytes, block_hash_hex, rounds):
|
||||||
|
queue.put(mimcvdf.vdf_verify(block_data_bytes, block_hash_hex, rounds))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def rounds_for_bytes(int: byte_count):
|
||||||
|
return byte_count * 1000
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_vdf(block_data_bytes):
|
||||||
|
rounds = rounds_for_bytes(block_data_bytes)
|
||||||
queue = multiprocessing.Queue()
|
queue = multiprocessing.Queue()
|
||||||
vdf_proc = multiprocessing.Process(target=_wrap_vdf_create, args=(queue, block_data_bytes, 1000))
|
vdf_proc = multiprocessing.Process(
|
||||||
|
target=_wrap_vdf_create,
|
||||||
|
args=(queue, block_data_bytes, rounds))
|
||||||
vdf_proc.start()
|
vdf_proc.start()
|
||||||
vdf_proc.join()
|
vdf_proc.join()
|
||||||
return queue.get()
|
return queue.get()
|
||||||
|
|
||||||
|
|
||||||
|
def verify_vdf(block_hash_hex, block_data_bytes):
|
||||||
|
rounds = rounds_for_bytes(block_data_bytes)
|
||||||
|
if rounds < 10 ** 6:
|
||||||
|
# >million rounds it starts to take long enough to warrant a subprocess
|
||||||
|
queue = multiprocessing.Queue()
|
||||||
|
vdf_proc = multiprocessing.Process(
|
||||||
|
target=_wrap_vdf_verify,
|
||||||
|
args=(queue, block_data_bytes, block_hash_hex, rounds))
|
||||||
|
vdf_proc.start()
|
||||||
|
vdf_proc.join()
|
||||||
|
return queue.get()
|
||||||
|
return mimcvdf.vdf_verify(block_data_bytes, block_hash_hex, rounds)
|
||||||
|
|
||||||
|
@ -31,9 +31,8 @@ from onionrtypes import BlockHash
|
|||||||
storage_counter = storagecounter.StorageCounter()
|
storage_counter = storagecounter.StorageCounter()
|
||||||
|
|
||||||
|
|
||||||
def set_data(data) -> BlockHash:
|
def set_data(data):
|
||||||
"""Set the data assciated with a hash."""
|
"""Set the data assciated with a hash."""
|
||||||
data = data
|
|
||||||
dataSize = sys.getsizeof(data)
|
dataSize = sys.getsizeof(data)
|
||||||
nonce_hash = crypto.hashers.sha3_hash(
|
nonce_hash = crypto.hashers.sha3_hash(
|
||||||
bytesconverter.str_to_bytes(
|
bytesconverter.str_to_bytes(
|
||||||
@ -50,7 +49,7 @@ def set_data(data) -> BlockHash:
|
|||||||
try:
|
try:
|
||||||
onionrstorage.getData(dataHash)
|
onionrstorage.getData(dataHash)
|
||||||
except onionrexceptions.NoDataAvailable:
|
except onionrexceptions.NoDataAvailable:
|
||||||
if storage_counter.add_bytes(dataSize) is not False:
|
if storage_counter.add_bytes(dataSize):
|
||||||
onionrstorage.store(data, block_hash=dataHash)
|
onionrstorage.store(data, block_hash=dataHash)
|
||||||
conn = sqlite3.connect(
|
conn = sqlite3.connect(
|
||||||
dbfiles.block_meta_db, timeout=DATABASE_LOCK_TIMEOUT)
|
dbfiles.block_meta_db, timeout=DATABASE_LOCK_TIMEOUT)
|
||||||
|
Loading…
Reference in New Issue
Block a user