progress removing onionr.py
This commit is contained in:
parent
a6fec9eefb
commit
aea32bd1bc
@ -54,6 +54,7 @@ from utils import createdirs
|
||||
createdirs.create_dirs()
|
||||
|
||||
from onionrcommands import parser
|
||||
import onionrevents as events
|
||||
|
||||
setup.setup_config()
|
||||
setup.setup_default_plugins()
|
||||
|
@ -23,7 +23,9 @@ from gevent.pywsgi import WSGIServer
|
||||
from onionrutils import epoch
|
||||
import httpapi, filepaths, logger
|
||||
from . import register_private_blueprints
|
||||
from etc import waitforsetvar
|
||||
import serializeddata, config
|
||||
from .. import public
|
||||
class PrivateAPI:
|
||||
'''
|
||||
Client HTTP api
|
||||
@ -48,8 +50,6 @@ class PrivateAPI:
|
||||
self.clientToken = config.get('client.webpassword')
|
||||
self.timeBypassToken = base64.b16encode(os.urandom(32)).decode()
|
||||
|
||||
self.publicAPI = None # gets set when the thread calls our setter... bad hack but kinda necessary with flask
|
||||
#threading.Thread(target=PublicAPI, args=(self,)).start()
|
||||
self.host = httpapi.apiutils.setbindip.set_bind_IP(filepaths.private_API_host_file)
|
||||
logger.info('Running api on %s:%s' % (self.host, self.bindPort))
|
||||
self.httpServer = ''
|
||||
@ -58,8 +58,12 @@ class PrivateAPI:
|
||||
self.get_block_data = httpapi.apiutils.GetBlockData(self)
|
||||
register_private_blueprints.register_private_blueprints(self, app)
|
||||
httpapi.load_plugin_blueprints(app)
|
||||
self.app = app
|
||||
|
||||
self.httpServer = WSGIServer((self.host, bindPort), app, log=None, handler_class=httpapi.fdsafehandler.FDSafeHandler)
|
||||
def start(self):
|
||||
waitforsetvar.wait_for_set_var(self, "_too_many")
|
||||
self.publicAPI = self._too_many.get(public.PublicAPI)
|
||||
self.httpServer = WSGIServer((self.host, self.bindPort), self.app, log=None, handler_class=httpapi.fdsafehandler.FDSafeHandler)
|
||||
self.httpServer.serve_forever()
|
||||
|
||||
def setPublicAPIInstance(self, inst):
|
||||
|
@ -18,12 +18,21 @@
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
import time
|
||||
import threading
|
||||
import flask
|
||||
from gevent.pywsgi import WSGIServer
|
||||
from httpapi import apiutils, security, fdsafehandler, miscpublicapi
|
||||
import logger, config, filepaths
|
||||
from utils import gettransports
|
||||
from etc import onionrvalues
|
||||
from etc import onionrvalues, waitforsetvar
|
||||
|
||||
def _get_tor_adder(pub_api):
|
||||
transports = []
|
||||
while len(transports) == 0:
|
||||
transports = gettransports.get()
|
||||
time.sleep(0.3)
|
||||
pub_api.torAdder = transports[0]
|
||||
|
||||
class PublicAPI:
|
||||
'''
|
||||
The new client api server, isolated from the public api
|
||||
@ -34,11 +43,10 @@ class PublicAPI:
|
||||
self.i2pEnabled = config.get('i2p.host', False)
|
||||
self.hideBlocks = [] # Blocks to be denied sharing
|
||||
self.host = apiutils.setbindip.set_bind_IP(filepaths.public_API_host_file)
|
||||
transports = []
|
||||
while len(transports) == 0:
|
||||
transports = gettransports.get()
|
||||
time.sleep(0.3)
|
||||
self.torAdder = transports[0]
|
||||
|
||||
threading.Thread(target=_get_tor_adder, args=[self], daemon=True).start()
|
||||
|
||||
self.torAdder = ""
|
||||
self.bindPort = config.get('client.public.port')
|
||||
self.lastRequest = 0
|
||||
self.hitCount = 0 # total rec requests to public api since server started
|
||||
@ -46,8 +54,11 @@ class PublicAPI:
|
||||
self.API_VERSION = onionrvalues.API_VERSION
|
||||
logger.info('Running public api on %s:%s' % (self.host, self.bindPort))
|
||||
|
||||
|
||||
app.register_blueprint(security.public.PublicAPISecurity(self).public_api_security_bp)
|
||||
app.register_blueprint(miscpublicapi.endpoints.PublicEndpoints(self).public_endpoints_bp)
|
||||
self.httpServer = WSGIServer((self.host, self.bindPort), app, log=None, handler_class=fdsafehandler.FDSafeHandler)
|
||||
self.app = app
|
||||
|
||||
def start(self):
|
||||
waitforsetvar.wait_for_set_var(self, "_too_many")
|
||||
self.httpServer = WSGIServer((self.host, self.bindPort), self.app, log=None, handler_class=fdsafehandler.FDSafeHandler)
|
||||
self.httpServer.serve_forever()
|
@ -32,22 +32,23 @@ from etc import humanreadabletime
|
||||
import onionrservices, filepaths, storagecounter
|
||||
from coredb import daemonqueue, dbfiles
|
||||
from utils import gettransports
|
||||
from netcontroller import NetController
|
||||
OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers
|
||||
|
||||
config.reload()
|
||||
class OnionrCommunicatorDaemon:
|
||||
def __init__(self, proxyPort, developmentMode=config.get('general.dev_mode', False)):
|
||||
def __init__(self, shared_state, developmentMode=config.get('general.dev_mode', False)):
|
||||
# configure logger and stuff
|
||||
self.config = config
|
||||
self.storage_counter = storagecounter.StorageCounter()
|
||||
self.proxyPort = proxyPort
|
||||
self.isOnline = True # Assume we're connected to the internet
|
||||
self.shared_state = shared_state
|
||||
|
||||
# list of timer instances
|
||||
self.timers = []
|
||||
|
||||
# initialize core with Tor socks port being 3rd argument
|
||||
self.proxyPort = proxyPort
|
||||
self.proxyPort = shared_state.get(NetController).socksPort
|
||||
|
||||
self.blocksToUpload = []
|
||||
|
||||
@ -158,6 +159,8 @@ class OnionrCommunicatorDaemon:
|
||||
cleanupTimer.count = (cleanupTimer.frequency - 60)
|
||||
blockCleanupTimer.count = (blockCleanupTimer.frequency - 5)
|
||||
|
||||
shared_state.add(self)
|
||||
|
||||
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
|
||||
try:
|
||||
while not self.shutdown:
|
||||
@ -244,8 +247,8 @@ class OnionrCommunicatorDaemon:
|
||||
|
||||
self.decrementThreadCount('runCheck')
|
||||
|
||||
def startCommunicator(proxyPort):
|
||||
OnionrCommunicatorDaemon(proxyPort)
|
||||
def startCommunicator(shared_state):
|
||||
OnionrCommunicatorDaemon(shared_state)
|
||||
|
||||
def run_file_exists(daemon):
|
||||
if os.path.isfile(filepaths.run_check_file):
|
||||
|
@ -22,6 +22,7 @@ import onionrproofs, logger
|
||||
from etc import onionrvalues
|
||||
from onionrutils import basicrequests, bytesconverter
|
||||
from utils import gettransports
|
||||
from netcontroller import NetController
|
||||
from communicator import onlinepeers
|
||||
from coredb import keydb
|
||||
def announce_node(daemon):
|
||||
@ -77,7 +78,7 @@ def announce_node(daemon):
|
||||
daemon.announceCache[peer] = data['random']
|
||||
if not announceFail:
|
||||
logger.info('Announcing node to ' + url)
|
||||
if basicrequests.do_post_request(daemon.onionrInst, url, data) == 'Success':
|
||||
if basicrequests.do_post_request(url, data, port=daemon.shared_state.get(NetController)) == 'Success':
|
||||
logger.info('Successfully introduced node to ' + peer, terminal=True)
|
||||
retData = True
|
||||
keydb.transportinfo.set_address_info(peer, 'introduced', 1)
|
||||
|
@ -26,7 +26,7 @@ def handle_daemon_commands(comm_inst):
|
||||
cmd = daemonqueue.daemon_queue()
|
||||
response = ''
|
||||
if cmd is not False:
|
||||
events.event('daemon_command', onionr = comm_inst.onionrInst, data = {'cmd' : cmd})
|
||||
events.event('daemon_command', data = {'cmd' : cmd})
|
||||
if cmd[0] == 'shutdown':
|
||||
comm_inst.shutdown = True
|
||||
elif cmd[0] == 'announceNode':
|
||||
|
@ -45,7 +45,7 @@ def upload_blocks_from_communicator(comm_inst):
|
||||
data = {'block': block.Block(bl).getRaw()}
|
||||
proxyType = proxypicker.pick_proxy(peer)
|
||||
logger.info("Uploading block to " + peer, terminal=True)
|
||||
if not basicrequests.do_post_request(comm_inst.onionrInst, url, data=data, proxyType=proxyType) == False:
|
||||
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False:
|
||||
localcommand.local_command('waitforshare/' + bl, post=True)
|
||||
finishedUploads.append(bl)
|
||||
for x in finishedUploads:
|
||||
|
4
onionr/etc/waitforsetvar.py
Normal file
4
onionr/etc/waitforsetvar.py
Normal file
@ -0,0 +1,4 @@
|
||||
def wait_for_set_var(obj, attribute):
|
||||
while True:
|
||||
if hasattr(obj, attribute):
|
||||
break
|
@ -20,6 +20,7 @@
|
||||
from flask import Response, Blueprint, request, send_from_directory, abort
|
||||
from httpapi import apiutils
|
||||
import onionrcrypto, config
|
||||
from netcontroller import NetController
|
||||
pub_key = onionrcrypto.pub_key
|
||||
class PrivateEndpoints:
|
||||
def __init__(self, client_api):
|
||||
@ -111,3 +112,7 @@ class PrivateEndpoints:
|
||||
@private_endpoints_bp.route('/getHumanReadable/<name>')
|
||||
def getHumanReadable(name):
|
||||
return Response(mnemonickeys.get_human_readable_ID(name))
|
||||
|
||||
@private_endpoints_bp.route('/gettorsocks')
|
||||
def get_tor_socks():
|
||||
return Response(client_api._too_many.get(NetController).socksPort)
|
@ -51,8 +51,13 @@ def daemon():
|
||||
logger.debug('Runcheck file found on daemon start, deleting in advance.')
|
||||
os.remove(filepaths.run_check_file)
|
||||
|
||||
Thread(target=apiservers.ClientAPI, daemon=True).start()
|
||||
Thread(target=apiservers.PublicAPI, daemon=True).start()
|
||||
# Create shared object
|
||||
|
||||
shared_state = toomanyobjs.TooMany()
|
||||
|
||||
Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True).start()
|
||||
Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True).start()
|
||||
shared_state.share_object() # share the parent object to the threads
|
||||
|
||||
apiHost = ''
|
||||
while apiHost == '':
|
||||
@ -62,7 +67,6 @@ def daemon():
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
#onionr.Onionr.setupConfig('data/', self = o_inst)
|
||||
|
||||
logger.raw('', terminal=True)
|
||||
# print nice header thing :)
|
||||
@ -73,7 +77,10 @@ def daemon():
|
||||
|
||||
if onionrvalues.DEVELOPMENT_MODE:
|
||||
logger.warn('Development mode enabled', timestamp = False, terminal=True)
|
||||
|
||||
net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost)
|
||||
shared_state.add(net)
|
||||
|
||||
logger.info('Tor is starting...', terminal=True)
|
||||
if not net.startTor():
|
||||
localcommand.local_command('shutdown')
|
||||
@ -88,9 +95,9 @@ def daemon():
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
_proper_shutdown()
|
||||
|
||||
events.event('init', threaded = False)
|
||||
events.event('daemon_start')
|
||||
communicator.startCommunicator(str(net.socksPort))
|
||||
communicator.startCommunicator(shared_state)
|
||||
|
||||
localcommand.local_command('shutdown')
|
||||
|
||||
|
@ -36,7 +36,7 @@ def add_peer(o_inst):
|
||||
except AssertionError:
|
||||
logger.error('Failed to add key', terminal=True)
|
||||
|
||||
def add_address(o_inst):
|
||||
def add_address():
|
||||
try:
|
||||
newAddress = sys.argv[2]
|
||||
newAddress = newAddress.replace('http:', '').replace('/', '')
|
||||
|
@ -1,12 +1,17 @@
|
||||
from .. import onionrstatistics, version, daemonlaunch
|
||||
from .. import onionrstatistics, version, daemonlaunch, keyadders
|
||||
import onionrexceptions
|
||||
import onionrevents as events
|
||||
def get_arguments():
|
||||
'''This is a function because we need to be able to dynamically modify them with plugins'''
|
||||
args = {
|
||||
('details', 'info'): onionrstatistics.show_details,
|
||||
('version'): version.version,
|
||||
('start', 'daemon'): daemonlaunch.start
|
||||
('start', 'daemon'): daemonlaunch.start,
|
||||
('stop', 'kill'): daemonlaunch.kill_daemon,
|
||||
('add-address', 'addaddress', 'addadder'): keyadders.add_address
|
||||
}
|
||||
args = events.event('init', data=args, threaded=False)
|
||||
print(args)
|
||||
return args
|
||||
|
||||
def get_help():
|
||||
|
@ -21,35 +21,35 @@
|
||||
import config, logger, onionrplugins as plugins, onionrpluginapi as pluginapi
|
||||
from threading import Thread
|
||||
|
||||
def get_pluginapi(onionr, data):
|
||||
return pluginapi.SharedAPI(onionr, data)
|
||||
def get_pluginapi(data):
|
||||
return pluginapi.SharedAPI(data)
|
||||
|
||||
def __event_caller(event_name, data = {}, onionr = None):
|
||||
def __event_caller(event_name, data = {}):
|
||||
'''
|
||||
DO NOT call this function, this is for threading code only.
|
||||
Instead, call onionrevents.event
|
||||
'''
|
||||
for plugin in plugins.get_enabled_plugins():
|
||||
try:
|
||||
call(plugins.get_plugin(plugin), event_name, data, get_pluginapi(onionr, data))
|
||||
return call(plugins.get_plugin(plugin), event_name, data, get_pluginapi(data))
|
||||
except ModuleNotFoundError as e:
|
||||
logger.warn('Disabling nonexistant plugin "%s"...' % plugin, terminal=True)
|
||||
plugins.disable(plugin, onionr, stop_event = False)
|
||||
plugins.disable(plugin, stop_event = False)
|
||||
except Exception as e:
|
||||
logger.warn('Event "%s" failed for plugin "%s".' % (event_name, plugin), terminal=True)
|
||||
logger.debug(str(e), terminal=True)
|
||||
|
||||
def event(event_name, data = {}, onionr = None, threaded = True):
|
||||
def event(event_name, data = {}, threaded = True):
|
||||
'''
|
||||
Calls an event on all plugins (if defined)
|
||||
'''
|
||||
|
||||
if threaded:
|
||||
thread = Thread(target = __event_caller, args = (event_name, data, onionr))
|
||||
thread = Thread(target = __event_caller, args = (event_name, data))
|
||||
thread.start()
|
||||
return thread
|
||||
else:
|
||||
__event_caller(event_name, data, onionr)
|
||||
return __event_caller(event_name, data)
|
||||
|
||||
def call(plugin, event_name, data = None, pluginapi = None):
|
||||
'''
|
||||
@ -61,7 +61,7 @@ def call(plugin, event_name, data = None, pluginapi = None):
|
||||
attribute = 'on_' + str(event_name).lower()
|
||||
|
||||
if hasattr(plugin, attribute):
|
||||
getattr(plugin, attribute)(pluginapi, data)
|
||||
return getattr(plugin, attribute)(pluginapi, data)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
|
@ -23,7 +23,7 @@ from onionrutils import epoch
|
||||
from . import scoresortedpeerlist, peerprofiles
|
||||
import onionrblacklist, config
|
||||
from coredb import keydb
|
||||
def peer_cleanup(onionr_inst):
|
||||
def peer_cleanup():
|
||||
'''Removes peers who have been offline too long or score too low'''
|
||||
logger.info('Cleaning peers...')
|
||||
blacklist = onionrblacklist.OnionrBlackList()
|
||||
|
@ -20,13 +20,14 @@
|
||||
import requests, streamedrequests
|
||||
import logger, onionrexceptions
|
||||
from etc import onionrvalues
|
||||
def do_post_request(onionr_inst, url, data={}, port=0, proxyType='tor', max_size=10000):
|
||||
from . import localcommand
|
||||
def do_post_request(url, data={}, port=0, proxyType='tor', max_size=10000):
|
||||
'''
|
||||
Do a POST request through a local tor or i2p instance
|
||||
'''
|
||||
if proxyType == 'tor':
|
||||
if port == 0:
|
||||
port = onionr_inst.torPort
|
||||
port = localcommand.local_command('/gettorsocks')
|
||||
proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)}
|
||||
elif proxyType == 'i2p':
|
||||
proxies = {'http': 'http://127.0.0.1:4444'}
|
||||
|
@ -20,8 +20,9 @@
|
||||
|
||||
import json
|
||||
from coredb import blockmetadb
|
||||
import communicator
|
||||
class SerializedData:
|
||||
def __init__(self, o_inst):
|
||||
def __init__(self):
|
||||
'''
|
||||
Serialized data is in JSON format:
|
||||
{
|
||||
@ -30,13 +31,14 @@ class SerializedData:
|
||||
etc
|
||||
}
|
||||
'''
|
||||
self.o_inst = o_inst
|
||||
self._too_many = None
|
||||
|
||||
def getStats(self):
|
||||
'''Return statistics about our node'''
|
||||
stats = {}
|
||||
stats['uptime'] = self.o_inst.communicatorInst.getUptime()
|
||||
stats['connectedNodes'] = '\n'.join(self.o_inst.communicatorInst.onlinePeers)
|
||||
comm_inst = self._too_many.get(communicator.OnionrCommunicatorDaemon)
|
||||
stats['uptime'] = comm_inst.getUptime()
|
||||
stats['connectedNodes'] = '\n'.join(comm_inst.onlinePeers)
|
||||
stats['blockCount'] = len(blockmetadb.get_block_list())
|
||||
stats['blockQueueCount'] = len(self.o_inst.communicatorInst.blockQueue)
|
||||
stats['blockQueueCount'] = len(comm_inst.blockQueue)
|
||||
return json.dumps(stats)
|
||||
|
@ -107,9 +107,7 @@ def on_init(api, data = None):
|
||||
global pluginapi
|
||||
pluginapi = api
|
||||
flow = OnionrFlow()
|
||||
api.commands.register('flow', flow.start)
|
||||
api.commands.register_help('flow', 'Open the flow messaging interface')
|
||||
return
|
||||
return data
|
||||
|
||||
def on_processblocks(api, data=None):
|
||||
b_hash = reconstructhash.deconstruct_hash(data['block'].hash) # Get the 0-truncated block hash
|
||||
|
9
onionr/utils/definewait.py
Normal file
9
onionr/utils/definewait.py
Normal file
@ -0,0 +1,9 @@
|
||||
import time
|
||||
def define_wait(dictionary, key, delay: int = 0):
|
||||
while True:
|
||||
try:
|
||||
return dictionary[key]
|
||||
except KeyError:
|
||||
pass
|
||||
if delay > 0:
|
||||
time.sleep(delay)
|
@ -1,7 +1,10 @@
|
||||
import sys, os
|
||||
from . import readstatic
|
||||
import logger
|
||||
from etc import onionrvalues
|
||||
def header(message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
|
||||
if onionrvalues.DEVELOPMENT_MODE:
|
||||
return
|
||||
header_path = readstatic.get_static_dir() + 'header.txt'
|
||||
if os.path.exists(header_path) and logger.settings.get_level() <= logger.settings.LEVEL_INFO:
|
||||
with open(header_path, 'rb') as file:
|
||||
|
@ -9,4 +9,4 @@ deadsimplekv==0.1.1
|
||||
unpaddedbase32==0.1.0
|
||||
streamedrequests==1.0.0
|
||||
jinja2==2.10.1
|
||||
toomanyobjs==0.0.0
|
||||
toomanyobjs==1.0.0
|
||||
|
@ -173,8 +173,8 @@ stem==1.7.1 \
|
||||
--hash=sha256:c9eaf3116cb60c15995cbd3dec3a5cbc50e9bb6e062c4d6d42201e566f498ca2
|
||||
streamedrequests==1.0.0 \
|
||||
--hash=sha256:1d9d07394804a6e1fd66bde74a804e71cab98e6920053865574a459f1cf7d3b7
|
||||
toomanyobjs==0.0.0 \
|
||||
--hash=sha256:997e9399a33d4884eb535b6cc2c02705ed9ae1bcff56afbbf3ea81fc4ac9ab95
|
||||
toomanyobjs==1.0.0 \
|
||||
--hash=sha256:040390188063dd00e5d903fd82a08850a175f9a384e09880d50acffb1e60ca70
|
||||
unpaddedbase32==0.1.0 \
|
||||
--hash=sha256:5e4143fcaf77c9c6b4f60d18301c7570f0dac561dcf9b9aed8b5ba6ead7f218c
|
||||
urllib3==1.24.2 \
|
||||
|
Loading…
Reference in New Issue
Block a user