progress in removing core

This commit is contained in:
Kevin Froman 2019-07-18 12:40:48 -05:00
parent e69c8dbb60
commit 1775b96a04
24 changed files with 187 additions and 155 deletions

View File

@ -20,9 +20,8 @@
import base64, os
import flask
from gevent.pywsgi import WSGIServer
import logger
from onionrutils import epoch
import httpapi
import httpapi, filepaths, logger
from . import register_private_blueprints
class PrivateAPI:
'''
@ -41,9 +40,8 @@ class PrivateAPI:
config = onionrInst.config
self.config = config
self.debug = debug
self._core = onionrInst.onionrCore
self.startTime = epoch.get_epoch()
self._crypto = self._core._crypto
self._crypto = onionrInst.onionrCrypto
app = flask.Flask(__name__)
bindPort = int(config.get('client.client.port', 59496))
self.bindPort = bindPort
@ -53,7 +51,7 @@ class PrivateAPI:
self.publicAPI = None # gets set when the thread calls our setter... bad hack but kinda necessary with flask
#threading.Thread(target=PublicAPI, args=(self,)).start()
self.host = httpapi.apiutils.setbindip.set_bind_IP(self._core.privateApiHostFile, self._core)
self.host = httpapi.apiutils.setbindip.set_bind_IP(filepaths.private_API_host_file)
logger.info('Running api on %s:%s' % (self.host, self.bindPort))
self.httpServer = ''

View File

@ -21,7 +21,8 @@ import time
import flask
from gevent.pywsgi import WSGIServer
from httpapi import apiutils, security, fdsafehandler, miscpublicapi
import logger, onionr
import logger, onionr, filepaths
from utils import gettransports
class PublicAPI:
'''
The new client api server, isolated from the public api
@ -32,9 +33,8 @@ class PublicAPI:
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
self.i2pEnabled = config.get('i2p.host', False)
self.hideBlocks = [] # Blocks to be denied sharing
self.host = apiutils.setbindip.set_bind_IP(clientAPI._core.publicApiHostFile, clientAPI._core)
self.torAdder = clientAPI._core.hsAddress
self.i2pAdder = clientAPI._core.i2pAddress
self.host = apiutils.setbindip.set_bind_IP(filepaths.public_API_host_file)
self.torAdder = gettransports.get_transports[0]
self.bindPort = config.get('client.public.port')
self.lastRequest = 0
self.hitCount = 0 # total rec requests to public api since server started
@ -45,10 +45,6 @@ class PublicAPI:
# Set instances, then startup our public api server
clientAPI.setPublicAPIInstance(self)
while self.torAdder == '':
clientAPI._core.refreshFirstStartVars()
self.torAdder = clientAPI._core.hsAddress
time.sleep(0.1)
app.register_blueprint(security.public.PublicAPISecurity(self).public_api_security_bp)
app.register_blueprint(miscpublicapi.endpoints.PublicEndpoints(self).public_endpoints_bp)

View File

@ -19,7 +19,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sys, os, time
import core, config, logger, onionr
import config, logger, onionr
import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block
from . import onlinepeers
from communicatorutils import servicecreator, onionrcommunicatortimers
@ -29,8 +29,8 @@ from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
from communicatorutils import cooldownpeer, housekeeping, netcheck
from onionrutils import localcommand, epoch
from etc import humanreadabletime
import onionrservices, onionr, onionrproofs
from coredb import daemonqueue
import onionrservices, onionr, filepaths
from coredb import daemonqueue, dbfiles
OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers
config.reload()
@ -48,7 +48,6 @@ class OnionrCommunicatorDaemon:
# initialize core with Tor socks port being 3rd argument
self.proxyPort = proxyPort
self._core = onionrInst.onionrCore
self.blocksToUpload = []
@ -84,7 +83,7 @@ class OnionrCommunicatorDaemon:
self.dbTimestamps = {}
# Clear the daemon queue for any dead messages
if os.path.exists(self._core.queueDB):
if os.path.exists(dbfiles.daemon_queue_db):
daemonqueue.clear_daemon_queue()
# Loads in and starts the enabled plugins
@ -102,8 +101,8 @@ class OnionrCommunicatorDaemon:
OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1)
# Timers to periodically lookup new blocks and download them
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2)
OnionrCommunicatorTimers(self, self.lookupBlocks, config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2)
# Timer to reset the longest offline peer so contact can be attempted again
OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, myArgs=[self])
@ -125,7 +124,7 @@ class OnionrCommunicatorDaemon:
# Setup direct connections
if config.get('general.socket_servers', False):
self.services = onionrservices.OnionrServices(self._core)
self.services = onionrservices.OnionrServices()
self.active_services = []
self.service_greenlets = []
OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self])
@ -182,7 +181,7 @@ class OnionrCommunicatorDaemon:
else:
for server in self.service_greenlets:
server.stop()
localcommand.local_command(self._core, 'shutdown') # shutdown the api
localcommand.local_command('shutdown') # shutdown the api
time.sleep(0.5)
def lookupAdders(self):
@ -211,7 +210,7 @@ class OnionrCommunicatorDaemon:
def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peer_cleanup(self._core)
onionrpeers.peer_cleanup()
self.decrementThreadCount('peerCleanup')
def getPeerProfileInstance(self, peer):
@ -223,7 +222,7 @@ class OnionrCommunicatorDaemon:
break
else:
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
retData = onionrpeers.PeerProfiles(peer, self._core)
retData = onionrpeers.PeerProfiles(peer)
return retData
def getUptime(self):
@ -249,7 +248,7 @@ def startCommunicator(onionrInst, proxyPort):
OnionrCommunicatorDaemon(onionrInst, proxyPort)
def run_file_exists(daemon):
if os.path.isfile(daemon._core.dataDir + '.runcheck'):
os.remove(daemon._core.dataDir + '.runcheck')
if os.path.isfile(filepaths.run_check_file):
os.remove(filepaths.run_check_file)
return True
return False

View File

@ -17,11 +17,14 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from utils import readstatic, gettransports
from coredb import keydb
bootstrap_peers = readstatic.read_static('bootstrap-nodes.txt').split(',')
def add_bootstrap_list_to_peer_list(comm_inst, peerList):
'''
Add the bootstrap list to the peer list (no duplicates)
'''
for i in comm_inst._core.bootstrapList:
if i not in peerList and i not in comm_inst.offlinePeers and i != comm_inst._core.hsAddress and len(str(i).strip()) > 0:
for i in bootstrap_peers:
if i not in peerList and i not in comm_inst.offlinePeers and i != gettransports.get_transports()[0] and len(str(i).strip()) > 0:
peerList.append(i)
comm_inst._core.addAddress(i)
keydb.addkeys.add_address(i)

View File

@ -24,7 +24,7 @@ def get_online_peers(comm_inst):
'''
Manages the comm_inst.onlinePeers attribute list, connects to more peers if we have none connected
'''
config = comm_inst._core.config
config = comm_inst.config
logger.debug('Refreshing peer pool...')
maxPeers = int(config.get('peers.max_connect', 10))
needed = maxPeers - len(comm_inst.onlinePeers)

View File

@ -17,6 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import secrets
def pick_online_peer(comm_inst):
'''randomly picks peer from pool without bias (using secrets module)'''
retData = ''
@ -26,7 +27,7 @@ def pick_online_peer(comm_inst):
break
try:
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
retData = comm_inst.onlinePeers[comm_inst._core._crypto.secrets.randbelow(peerLength)]
retData = comm_inst.onlinePeers[secrets.randbelow(peerLength)]
except IndexError:
pass
else:

View File

@ -21,6 +21,7 @@ import streamedrequests
import logger
from onionrutils import epoch, basicrequests
from . import onlinepeers
from coredb import keydb
def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_size=5242880):
'''Perform a get request to a peer'''
penalty_score = -10
@ -30,9 +31,9 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
if len(data) > 0:
url += '&data=' + data
comm_inst._core.setAddressInfo(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
try:
retData = basicrequests.do_get_request(comm_inst._core, url, port=comm_inst.proxyPort, max_size=max_resp_size)
retData = basicrequests.do_get_request(url, port=comm_inst.proxyPort, max_size=max_resp_size)
except streamedrequests.exceptions.ResponseLimitReached:
logger.warn('Request failed due to max response size being overflowed', terminal=True)
retData = False
@ -48,6 +49,6 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
except ValueError:
pass
else:
comm_inst._core.setAddressInfo(peer, 'lastConnect', epoch.get_epoch())
keydb.transportinfo.set_address_info(peer, 'lastConnect', epoch.get_epoch())
comm_inst.getPeerProfileInstance(peer).addScore(1)
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string

View File

@ -22,7 +22,7 @@ import onionrproofs, logger
from etc import onionrvalues
from onionrutils import basicrequests, bytesconverter
from communicator import onlinepeers
from coredb import keydb
def announce_node(daemon):
'''Announce our node to our peers'''
ov = onionrvalues.OnionrValues()
@ -33,7 +33,7 @@ def announce_node(daemon):
if len(daemon.announceCache) >= 10000:
daemon.announceCache.popitem()
if daemon._core.config.get('general.security_level', 0) == 0:
if daemon.config.get('general.security_level', 0) == 0:
# Announce to random online peers
for i in daemon.onlinePeers:
if not i in daemon.announceCache and not i in daemon.announceProgress:
@ -43,18 +43,14 @@ def announce_node(daemon):
peer = onlinepeers.pick_online_peer(daemon)
for x in range(1):
if x == 1 and daemon._core.config.get('i2p.host'):
ourID = daemon._core.config.get('i2p.own_addr').strip()
else:
ourID = daemon._core.hsAddress.strip()
ourID = daemon.hsAddress
url = 'http://' + peer + '/announce'
data = {'node': ourID}
combinedNodes = ourID + peer
if ourID != 1:
#TODO: Extend existingRand for i2p
existingRand = bytesconverter.bytes_to_str(daemon._core.getAddressInfo(peer, 'powValue'))
existingRand = bytesconverter.bytes_to_str(keydb.addressinfo.get_address_info(peer, 'powValue'))
# Reset existingRand if it no longer meets the minimum POW
if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow):
existingRand = ''
@ -77,10 +73,10 @@ def announce_node(daemon):
daemon.announceCache[peer] = data['random']
if not announceFail:
logger.info('Announcing node to ' + url)
if basicrequests.do_post_request(daemon._core, url, data) == 'Success':
if basicrequests.do_post_request(url, data) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True)
retData = True
daemon._core.setAddressInfo(peer, 'introduced', 1)
daemon._core.setAddressInfo(peer, 'powValue', data['random'])
keydb.addressinfo.set_address_info(peer, 'introduced', 1)
keydb.addressinfo.set_address_info(peer, 'powValue', data['random'])
daemon.decrementThreadCount('announce_node')
return retData

View File

@ -22,9 +22,9 @@ import onionrexceptions, logger, onionrpeers
from utils import networkmerger
from onionrutils import stringvalidators, epoch
from communicator import peeraction, bootstrappeers
from coredb import keydb
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
config = comm_inst._core.config
config = comm_inst.config
retData = False
tried = comm_inst.offlinePeers
if peer != '':
@ -33,10 +33,10 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
else:
raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address')
else:
peerList = comm_inst._core.listAdders()
peerList = keydb.listkeys.list_adders()
mainPeerList = comm_inst._core.listAdders()
peerList = onionrpeers.get_score_sorted_peer_list(comm_inst._core)
mainPeerList = keydb.listkeys.list_adders()
peerList = onionrpeers.get_score_sorted_peer_list()
# If we don't have enough peers connected or random chance, select new peers to try
if len(peerList) < 8 or secrets.randbelow(4) == 3:
@ -56,7 +56,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if not config.get('tor.v3onions') and len(address) == 62:
continue
# Don't connect to our own address
if address == comm_inst._core.hsAddress:
if address == comm_inst.hsAddress:
continue
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
@ -68,7 +68,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
time.sleep(0.1)
if address not in mainPeerList:
# Add a peer to our list if it isn't already since it successfully connected
networkmerger.mergeAdders(address, comm_inst._core)
networkmerger.mergeAdders(address)
if address not in comm_inst.onlinePeers:
logger.info('Connected to ' + address, terminal=True)
comm_inst.onlinePeers.append(address)
@ -80,7 +80,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if profile.address == address:
break
else:
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address, comm_inst._core))
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address))
break
else:
# Mark a peer as tried if they failed to respond to ping

View File

@ -21,6 +21,7 @@ from onionrutils import epoch
from communicator import onlinepeers
def cooldown_peer(comm_inst):
'''Randomly add an online peer to cooldown, so we can connect a new one'''
config = comm_inst.config
onlinePeerAmount = len(comm_inst.onlinePeers)
minTime = 300
cooldownTime = 600
@ -34,7 +35,7 @@ def cooldown_peer(comm_inst):
del comm_inst.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= comm_inst._core.config.get('peers.max_connect', 10, save = True):
if onlinePeerAmount >= config.get('peers.max_connect', 10, save = True):
finding = True
while finding:

View File

@ -21,11 +21,12 @@ import logger
import onionrevents as events
from onionrutils import localcommand
from coredb import daemonqueue
import filepaths
def handle_daemon_commands(comm_inst):
cmd = daemonqueue.daemon_queue()
response = ''
if cmd is not False:
events.event('daemon_command', onionr = comm_inst._core.onionrInst, data = {'cmd' : cmd})
events.event('daemon_command', onionr = comm_inst.onionrInst, data = {'cmd' : cmd})
if cmd[0] == 'shutdown':
comm_inst.shutdown = True
elif cmd[0] == 'announceNode':
@ -35,13 +36,13 @@ def handle_daemon_commands(comm_inst):
logger.debug("No nodes connected. Will not introduce node.")
elif cmd[0] == 'runCheck': # deprecated
logger.debug('Status check; looks good.')
open(comm_inst._core.dataDir + '.runcheck', 'w+').close()
open(filepaths.run_check_file + '.runcheck', 'w+').close()
elif cmd[0] == 'connectedPeers':
response = '\n'.join(list(comm_inst.onlinePeers)).strip()
if response == '':
response = 'none'
elif cmd[0] == 'localCommand':
response = localcommand.local_command(comm_inst._core, cmd[1])
response = localcommand.local_command(cmd[1])
elif cmd[0] == 'pex':
for i in comm_inst.timers:
if i.timerFunction.__name__ == 'lookupAdders':
@ -51,7 +52,7 @@ def handle_daemon_commands(comm_inst):
if cmd[0] not in ('', None):
if response != '':
localcommand.local_command(comm_inst._core, 'queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
localcommand.local_command('queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
response = ''
comm_inst.decrementThreadCount('handle_daemon_commands')

View File

@ -22,9 +22,13 @@ import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata
from . import shoulddownload
from communicator import peeraction, onlinepeers
import onionrcrypto, onionrstorage, onionrblacklist, storagecounter
def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
crypto = onionrcrypto.OnionrCrypto()
blacklist = onionrblacklist.OnionrBlackList()
storage_counter = storagecounter.StorageCounter()
for blockHash in list(comm_inst.blockQueue):
if len(comm_inst.onlinePeers) == 0:
break
@ -38,7 +42,7 @@ def download_blocks_from_communicator(comm_inst):
if not shoulddownload.should_download(comm_inst, blockHash):
continue
if comm_inst.shutdown or not comm_inst.isOnline or comm_inst._core.storage_counter.isFull():
if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.isFull():
# Exit loop if shutting down or offline, or disk allocation reached
break
# Do not download blocks being downloaded
@ -50,7 +54,7 @@ def download_blocks_from_communicator(comm_inst):
if len(blockPeers) == 0:
peerUsed = onlinepeers.pick_online_peer(comm_inst)
else:
blockPeers = comm_inst._core._crypto.randomShuffle(blockPeers)
blockPeers = crypto.randomShuffle(blockPeers)
peerUsed = blockPeers.pop(0)
if not comm_inst.shutdown and peerUsed.strip() != '':
@ -62,7 +66,7 @@ def download_blocks_from_communicator(comm_inst):
except AttributeError:
pass
realHash = comm_inst._core._crypto.sha3Hash(content)
realHash = ccrypto.sha3Hash(content)
try:
realHash = realHash.decode() # bytes on some versions for some reason
except AttributeError:
@ -71,11 +75,11 @@ def download_blocks_from_communicator(comm_inst):
content = content.decode() # decode here because sha3Hash needs bytes above
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
if validatemetadata.validate_metadata(comm_inst._core, metadata, metas[2]): # check if metadata is valid, and verify nonce
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12])
try:
comm_inst._core.setData(content)
onionrstorage.setdata.set_data(content)
except onionrexceptions.DataExists:
logger.warn('Data is already set for %s ' % (blockHash,))
except onionrexceptions.DiskAllocationReached:
@ -83,24 +87,24 @@ def download_blocks_from_communicator(comm_inst):
removeFromQueue = False
else:
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database
blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
else:
logger.warn('POW failed for block %s.' % (blockHash,))
else:
if comm_inst._core._blacklist.inBlacklist(realHash):
if blacklist.inBlacklist(realHash):
logger.warn('Block %s is blacklisted.' % (realHash,))
else:
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
comm_inst._core._blacklist.addToDB(blockHash)
blacklist.addToDB(blockHash)
else:
# if block didn't meet expected hash
tempHash = comm_inst._core._crypto.sha3Hash(content) # lazy hack, TODO use var
tempHash = crypto.sha3Hash(content) # lazy hack, TODO use var
try:
tempHash = tempHash.decode()
except AttributeError:
pass
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, comm_inst._core).addScore(-50)
onionrpeers.PeerProfiles(peerUsed).addScore(-50)
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)

View File

@ -23,7 +23,7 @@ def should_download(comm_inst, block_hash):
if block_hash in blockmetadb.get_block_list(): # Dont download block we have
ret_data = False
else:
if comm_inst._core._blacklist.inBlacklist(block_hash): # Dont download blacklisted block
if comm_inst.blacklist.inBlacklist(block_hash): # Dont download blacklisted block
ret_data = False
if ret_data is False:
# Remove block from communicator queue if it shouldnt be downloaded

View File

@ -41,81 +41,48 @@ class Core:
# set data dir
self.dataDir = identifyhome.identify_home()
try:
self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config
self.maxBlockSize = 10000000 # max block size in bytes
self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config
self.maxBlockSize = 10000000 # max block size in bytes
self.onionrInst = None
self.blockDataLocation = self.dataDir + 'blocks/'
self.blockDataDB = self.blockDataLocation + 'block-data.db'
self.publicApiHostFile = self.dataDir + 'public-host.txt'
self.privateApiHostFile = self.dataDir + 'private-host.txt'
self.addressDB = self.dataDir + 'address.db'
self.hsAddress = ''
self.i2pAddress = config.get('i2p.own_addr', None)
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
self.storage_counter = storagecounter.StorageCounter(self)
# Socket data, defined here because of multithreading constraints with gevent
self.killSockets = False
self.startSocket = {}
self.socketServerConnData = {}
self.socketReasons = {}
self.socketServerResponseData = {}
self.onionrInst = None
self.hsAddress = ''
self.i2pAddress = config.get('i2p.own_addr', None)
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
self.storage_counter = storagecounter.StorageCounter(self)
# Socket data, defined here because of multithreading constraints with gevent
self.killSockets = False
self.startSocket = {}
self.socketServerConnData = {}
self.socketReasons = {}
self.socketServerResponseData = {}
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataDir + 'blocks/'):
os.mkdir(self.dataDir + 'blocks/')
if not os.path.exists(self.blockDB):
self.createBlockDB()
if not os.path.exists(self.forwardKeysFile):
dbcreator.createForwardKeyDB()
if not os.path.exists(self.peerDB):
self.createPeerDB()
if not os.path.exists(self.addressDB):
self.createAddressDB()
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
with open(self.bootstrapFileLocation, 'r') as bootstrap:
bootstrap = bootstrap.read()
for i in bootstrap.split('\n'):
self.bootstrapList.append(i)
else:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
self.use_subprocess = powchoice.use_subprocess(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self)
except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error, terminal=True)
logger.fatal('Cannot recover from error.', terminal=True)
sys.exit(1)
return
def refreshFirstStartVars(self):
'''
Hack to refresh some vars which may not be set on first start
'''
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
with open(self.bootstrapFileLocation, 'r') as bootstrap:
bootstrap = bootstrap.read()
for i in bootstrap.split('\n'):
self.bootstrapList.append(i)
else:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
self.use_subprocess = powchoice.use_subprocess(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self)
def addPeer(self, peerID, name=''):
'''
Adds a public key to the key database (misleading function name)

View File

@ -29,6 +29,8 @@ def createAddressDB():
2: Tor v2 (like facebookcorewwwi.onion)
3: Tor v3
'''
if os.path.exists(dbfiles.address_info_db):
raise FileExistsError("Address database already exists")
conn = sqlite3.connect(dbfiles.address_info_db)
c = conn.cursor()
c.execute('''CREATE TABLE adders(
@ -52,6 +54,8 @@ def createPeerDB():
'''
Generate the peer sqlite3 database and populate it with the peers table.
'''
if os.path.exists(dbfiles.user_id_info_db):
raise FileExistsError("User database already exists")
# generate the peer database
conn = sqlite3.connect(dbfiles.user_id_info_db)
c = conn.cursor()
@ -146,9 +150,14 @@ def createDaemonDB():
'''
Create the daemon queue database
'''
if os.path.exists(dbfiles.daemon_queue_db):
raise FileExistsError("Daemon queue db already exists")
conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=10)
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE commands (id integer primary key autoincrement, command text, data text, date text, responseID text)''')
conn.commit()
conn.close()
conn.close()
create_funcs = [createAddressDB, createPeerDB, createBlockDB, createBlockDataDB, createForwardKeyDB, createDaemonDB]

View File

@ -1,6 +1,6 @@
from utils import identifyhome
home = identifyhome.identify_home()
if not home.endswith('/') home += '/'
if not home.endswith('/'): home += '/'
usage_file = home + 'disk-usage.txt'
block_data_location = home + 'blocks/'
@ -8,4 +8,8 @@ public_API_host_file = home + 'public-host.txt'
private_API_host_file = home + 'private-host.txt'
bootstrap_file_location = 'static-data/bootstrap-nodes.txt'
data_nonce_file = home + 'block-nonces.dat'
forward_keys_file = home + 'forward-keys.db'
forward_keys_file = home + 'forward-keys.db'
tor_hs_address_file = home + 'hs/hostname'
run_check_file = home + '.runcheck'

View File

@ -1,9 +1,7 @@
import random, socket
import config, logger
def set_bind_IP(filePath='', core_inst=None):
def set_bind_IP(filePath=''):
'''Set a random localhost IP to a specified file (intended for private or public API localhost IPs)'''
if not core_inst is None:
config = core_inst.config
if config.get('general.random_bind_ip', True):
hostOctets = [str(127), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF))]

View File

@ -57,7 +57,7 @@ def error(data, error = None, timestamp = True, prompt = True, terminal = False,
def fatal(data, error = None, timestamp=True, prompt = True, terminal = False, level = settings.LEVEL_FATAL):
if not error is None:
debug('Error: ' + str(error) + parse_error(), terminal = terminal)
if get_level() <= level:
if settings.get_level() <= level:
log('#', data, colors.bg.red + colors.fg.green + colors.bold, timestamp = timestamp, fd = sys.stderr, prompt = prompt, terminal = terminal)
# returns a formatted error message

View File

@ -18,13 +18,20 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import filepaths
import config
def get_client_API_server():
config.reload()
retData = ''
getconf = lambda: config.get('client.client.port')
port = getconf()
if port is None:
config.reload()
port = getconf()
try:
with open(filepaths.private_API_host_file, 'r') as host:
hostname = host.read()
except FileNotFoundError:
raise FileNotFoundError
else:
retData += '%s:%s' % (hostname, core_inst.config.get('client.client.port'))
retData += '%s:%s' % (hostname, port)
return retData

View File

@ -15,41 +15,41 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import urllib, requests, time
import logger
import logger, config
from . import getclientapiserver
hostname = ''
waited = 0
maxWait = 3
config.reload()
def get_hostname():
while hostname == '':
try:
hostname = getclientapiserver.get_client_API_server(core_inst)
hostname = getclientapiserver.get_client_API_server()
except FileNotFoundError:
time.sleep(1)
waited += 1
if waited == maxWait:
return False
return hostname
hostname = get_hostname()
def local_command(core_inst, command, data='', silent = True, post=False, postData = {}, maxWait=20):
def local_command(command, data='', silent = True, post=False, postData = {}, maxWait=20):
'''
Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers.
'''
# TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless.
if hostname == False:
if hostname == '':
hostname = get_hostname()
if data != '':
data = '&data=' + urllib.parse.quote_plus(data)
payload = 'http://%s/%s%s' % (hostname, command, data)
try:
if post:
retData = requests.post(payload, data=postData, headers={'token': core_inst.config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
retData = requests.post(payload, data=postData, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
else:
retData = requests.get(payload, headers={'token': core_inst.config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
retData = requests.get(payload, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
except Exception as error:
if not silent:
logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True)

View File

@ -0,0 +1,17 @@
from onionrutils import localcommand
import config
config.reload()
running_detected = False # if we know the api server is running
first_get = True
def config_get(key):
ret_data = False
if running_detected or first_get:
first_get = False
ret_data = localcommand.local_command('/config/get/' + key)
if ret_data == False:
running_detected = False
ret_data = config.get(key)
else:
running_detected = False
return ret_data

View File

@ -0,0 +1,13 @@
from . import identifyhome
import dbcreator, filepaths
home = identifyhome.identify_home()
if not os.path.exists(home):
os.mkdir(home)
os.mkdir(filepaths.block_data_location)
for db in dbcreator.create_funcs:
try:
db()
except FileExistsError:
pass

View File

@ -0,0 +1,7 @@
import filepaths
files = [filepaths.tor_hs_address_file]
transports = []
for file in files:
with open(file, 'r') as transport_file:
transports.append(transport_file.read())

View File

@ -0,0 +1,10 @@
import os
def read_static(file, ret_bin=False):
static_file = os.path.realpath(__file__) + '../static-data/' + file
if ret_bin:
mode = 'rb'
else:
mode = 'r'
with open(static_file, mode) as f:
return f.read()