2020-06-30 22:08:32 +00:00
|
|
|
"""Onionr - Private P2P Communication.
|
2019-05-09 05:27:15 +00:00
|
|
|
|
2020-02-23 08:12:13 +00:00
|
|
|
Lookup new blocks with the communicator using a random connected peer
|
|
|
|
"""
|
|
|
|
from gevent import time
|
|
|
|
|
|
|
|
import logger
|
|
|
|
import onionrproofs
|
|
|
|
from onionrutils import stringvalidators, epoch
|
|
|
|
from communicator import peeraction, onlinepeers
|
2020-06-30 22:08:32 +00:00
|
|
|
from coredb.blockmetadb import get_block_list
|
2020-02-23 08:12:13 +00:00
|
|
|
from utils import reconstructhash
|
|
|
|
from onionrblocks import onionrblacklist
|
|
|
|
import onionrexceptions
|
|
|
|
import config
|
|
|
|
from etc import onionrvalues
|
2020-07-24 08:24:41 +00:00
|
|
|
from onionrblocks.storagecounter import StorageCounter
|
2020-02-23 08:12:13 +00:00
|
|
|
"""
|
2019-05-09 05:27:15 +00:00
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
2020-02-23 08:12:13 +00:00
|
|
|
"""
|
2019-12-20 07:24:38 +00:00
|
|
|
|
2019-07-22 05:24:42 +00:00
|
|
|
blacklist = onionrblacklist.OnionrBlackList()
|
2020-07-24 08:24:41 +00:00
|
|
|
storage_counter = StorageCounter()
|
2020-02-23 08:12:13 +00:00
|
|
|
|
|
|
|
|
2019-05-08 03:28:06 +00:00
|
|
|
def lookup_blocks_from_communicator(comm_inst):
|
2019-08-27 08:26:14 +00:00
|
|
|
logger.info('Looking up new blocks')
|
2019-06-19 20:29:27 +00:00
|
|
|
tryAmount = 2
|
|
|
|
newBlocks = ''
|
2020-06-30 22:08:32 +00:00
|
|
|
# List of existing saved blocks
|
|
|
|
existingBlocks = get_block_list()
|
|
|
|
triedPeers = [] # list of peers we've tried this time around
|
|
|
|
# Max amount of *new* block hashes to have in queue
|
|
|
|
maxBacklog = 1560
|
|
|
|
lastLookupTime = 0 # Last time we looked up a particular peer's list
|
2019-06-19 20:29:27 +00:00
|
|
|
new_block_count = 0
|
2020-07-24 19:37:01 +00:00
|
|
|
kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string("DeadSimpleKV")
|
2019-06-19 20:29:27 +00:00
|
|
|
for i in range(tryAmount):
|
2020-06-30 22:08:32 +00:00
|
|
|
# Defined here to reset it each time, time offset is added later
|
|
|
|
listLookupCommand = 'getblocklist'
|
2020-07-24 19:37:01 +00:00
|
|
|
if len(kv.get('blockQueue')) >= maxBacklog:
|
2019-06-19 20:29:27 +00:00
|
|
|
break
|
|
|
|
if not comm_inst.isOnline:
|
|
|
|
break
|
|
|
|
# check if disk allocation is used
|
2020-07-24 08:24:41 +00:00
|
|
|
if storage_counter.is_full():
|
2020-06-30 22:08:32 +00:00
|
|
|
logger.debug(
|
|
|
|
'Not looking up new blocks due to maximum amount of disk used')
|
2019-06-19 20:29:27 +00:00
|
|
|
break
|
2019-12-20 07:24:38 +00:00
|
|
|
try:
|
|
|
|
# select random online peer
|
|
|
|
peer = onlinepeers.pick_online_peer(comm_inst)
|
|
|
|
except onionrexceptions.OnlinePeerNeeded:
|
|
|
|
time.sleep(1)
|
|
|
|
continue
|
2019-06-19 20:29:27 +00:00
|
|
|
# if we've already tried all the online peers this time around, stop
|
|
|
|
if peer in triedPeers:
|
2020-07-26 03:28:32 +00:00
|
|
|
if len(kv.get('onlinePeers')) == len(triedPeers):
|
2019-05-08 03:28:06 +00:00
|
|
|
break
|
|
|
|
else:
|
2019-06-19 20:29:27 +00:00
|
|
|
continue
|
|
|
|
triedPeers.append(peer)
|
|
|
|
|
2020-06-30 22:08:32 +00:00
|
|
|
# Get the last time we looked up a peer's stamp,
|
|
|
|
# to only fetch blocks since then.
|
2019-06-19 20:29:27 +00:00
|
|
|
# Saved in memory only for privacy reasons
|
|
|
|
try:
|
2020-07-26 20:49:34 +00:00
|
|
|
lastLookupTime = kv.get('dbTimestamps')[peer]
|
2019-06-19 20:29:27 +00:00
|
|
|
except KeyError:
|
2020-06-30 22:08:32 +00:00
|
|
|
lastLookupTime = epoch.get_epoch() - \
|
|
|
|
config.get("general.max_block_age",
|
|
|
|
onionrvalues.DEFAULT_EXPIRE)
|
2020-02-23 08:12:13 +00:00
|
|
|
listLookupCommand += '?date=%s' % (lastLookupTime,)
|
2019-06-19 20:29:27 +00:00
|
|
|
try:
|
2020-06-30 22:08:32 +00:00
|
|
|
newBlocks = peeraction.peer_action(
|
|
|
|
comm_inst,
|
|
|
|
peer, listLookupCommand) # get list of new block hashes
|
2019-06-19 20:29:27 +00:00
|
|
|
except Exception as error:
|
2020-06-30 22:08:32 +00:00
|
|
|
logger.warn(
|
|
|
|
f'Could not get new blocks from {peer}.',
|
|
|
|
error=error)
|
2019-06-19 20:29:27 +00:00
|
|
|
newBlocks = False
|
2020-06-30 22:08:32 +00:00
|
|
|
|
|
|
|
if newBlocks != False: # noqa
|
2019-06-19 20:29:27 +00:00
|
|
|
# if request was a success
|
|
|
|
for i in newBlocks.split('\n'):
|
2019-06-25 23:07:35 +00:00
|
|
|
if stringvalidators.validate_hash(i):
|
2019-07-27 21:56:06 +00:00
|
|
|
i = reconstructhash.reconstruct_hash(i)
|
2019-06-19 20:29:27 +00:00
|
|
|
# if newline seperated string is valid hash
|
2020-06-30 22:08:32 +00:00
|
|
|
|
|
|
|
# if block does not exist on disk + is not already in queue
|
|
|
|
if i not in existingBlocks:
|
2020-07-24 19:37:01 +00:00
|
|
|
if i not in kv.get('blockQueue'):
|
2020-06-30 22:08:32 +00:00
|
|
|
if onionrproofs.hashMeetsDifficulty(i) and \
|
|
|
|
not blacklist.inBlacklist(i):
|
2020-07-24 19:37:01 +00:00
|
|
|
if len(kv.get('blockQueue')) <= 1000000:
|
2020-06-30 22:08:32 +00:00
|
|
|
# add blocks to download queue
|
2020-07-24 19:37:01 +00:00
|
|
|
kv.get('blockQueue')[i] = [peer]
|
2019-06-19 20:29:27 +00:00
|
|
|
new_block_count += 1
|
2020-07-26 20:49:34 +00:00
|
|
|
kv.get('dbTimestamps')[peer] = \
|
2020-06-30 22:09:18 +00:00
|
|
|
epoch.get_rounded_epoch(roundS=60)
|
2019-06-19 20:29:27 +00:00
|
|
|
else:
|
2020-07-24 19:37:01 +00:00
|
|
|
if peer not in kv.get('blockQueue')[i]:
|
|
|
|
if len(kv.get('blockQueue')[i]) < 10:
|
|
|
|
kv.get('blockQueue')[i].append(peer)
|
2019-06-19 20:29:27 +00:00
|
|
|
if new_block_count > 0:
|
2019-06-20 00:59:05 +00:00
|
|
|
block_string = ""
|
|
|
|
if new_block_count > 1:
|
|
|
|
block_string = "s"
|
2020-06-30 22:08:32 +00:00
|
|
|
logger.info(
|
|
|
|
f'Discovered {new_block_count} new block{block_string}',
|
|
|
|
terminal=True)
|
|
|
|
comm_inst.download_blocks_timer.count = \
|
|
|
|
int(comm_inst.download_blocks_timer.frequency * 0.99)
|
2019-08-11 20:44:16 +00:00
|
|
|
comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
|