code cleanup, defunct code removal

This commit is contained in:
Kevin Froman 2019-02-11 23:30:56 -06:00
parent 944c76d2e9
commit 7c57829ec3
9 changed files with 106 additions and 115 deletions

View File

@ -31,8 +31,6 @@ class FDSafeHandler(WSGIHandler):
def handle(self):
timeout = Timeout(60, exception=Exception)
timeout.start()
#timeout = gevent.Timeout.start_new(3)
try:
WSGIHandler.handle(self)
except Timeout as ex:
@ -76,28 +74,35 @@ class PublicAPI:
@app.before_request
def validateRequest():
'''Validate request has the correct hostname'''
# If high security level, deny requests to public
# If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P)
if config.get('general.security_level', default=0) > 0:
abort(403)
if type(self.torAdder) is None and type(self.i2pAdder) is None:
# abort if our hs addresses are not known
abort(403)
if request.host not in (self.i2pAdder, self.torAdder):
# Disallow connection if wrong HTTP hostname, in order to prevent DNS rebinding attacks
abort(403)
@app.after_request
def sendHeaders(resp):
'''Send api, access control headers'''
resp.headers['Date'] = 'Thu, 1 Jan 1970 00:00:00 GMT' # Clock info is probably useful to attackers. Set to unix epoch.
resp.headers['Date'] = 'Thu, 1 Jan 1970 00:00:00 GMT' # Clock info is probably useful to attackers. Set to unix epoch, since we can't fully remove the header.
# CSP to prevent XSS. Mainly for client side attacks (if hostname protection could somehow be bypassed)
resp.headers["Content-Security-Policy"] = "default-src 'none'; script-src 'none'; object-src 'none'; style-src data: 'unsafe-inline'; img-src data:; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src 'none'"
# Prevent click jacking
resp.headers['X-Frame-Options'] = 'deny'
# No sniff is possibly not needed
resp.headers['X-Content-Type-Options'] = "nosniff"
# Network API version
resp.headers['X-API'] = onionr.API_VERSION
# Close connections to limit FD use
resp.headers['Connection'] = "close"
return resp
@app.route('/')
def banner():
# Display a bit of information to people who visit a node address in their browser
try:
with open('static-data/index.html', 'r') as html:
resp = Response(html.read(), mimetype='text/html')
@ -107,21 +112,28 @@ class PublicAPI:
@app.route('/getblocklist')
def getBlockList():
# Provide a list of our blocks, with a date offset
dateAdjust = request.args.get('date')
bList = clientAPI._core.getBlockList(dateRec=dateAdjust)
for b in self.hideBlocks:
if b in bList:
# Don't share blocks we created if they haven't been *uploaded* yet, makes it harder to find who created a block
bList.remove(b)
return Response('\n'.join(bList))
@app.route('/getdata/<name>')
def getBlockData(name):
# Share data for a block if we have it
resp = ''
data = name
if clientAPI._utils.validateHash(data):
if data not in self.hideBlocks:
if data in clientAPI._core.getBlockList():
block = clientAPI.getBlockData(data, raw=True).encode()
block = clientAPI.getBlockData(data, raw=True)
try:
block = block.encode()
except AttributeError:
abort(404)
block = clientAPI._core._utils.strToBytes(block)
resp = block
#resp = base64.b64encode(block).decode()
@ -132,18 +144,16 @@ class PublicAPI:
@app.route('/www/<path:path>')
def wwwPublic(path):
# A way to share files directly over your .onion
if not config.get("www.public.run", True):
abort(403)
return send_from_directory(config.get('www.public.path', 'static-data/www/public/'), path)
@app.route('/ping')
def ping():
# Endpoint to test if nodes are up
return Response("pong!")
@app.route('/getdbhash')
def getDBHash():
return Response(clientAPI._utils.getBlockDBHash())
@app.route('/pex')
def peerExchange():
response = ','.join(clientAPI._core.listAdders(recent=3600))
@ -191,6 +201,9 @@ class PublicAPI:
@app.route('/upload', methods=['post'])
def upload():
'''Accept file uploads. In the future this will be done more often than on creation
to speed up block sync
'''
resp = 'failure'
try:
data = request.form['block']
@ -212,6 +225,7 @@ class PublicAPI:
resp = Response(resp)
return resp
# Set instances, then startup our public api server
clientAPI.setPublicAPIInstance(self)
while self.torAdder == '':
clientAPI._core.refreshFirstStartVars()
@ -239,7 +253,6 @@ class API:
onionr.Onionr.setupConfig('data/', self = self)
self.debug = debug
self._privateDelayTime = 3
self._core = onionrInst.onionrCore
self.startTime = self._core._utils.getEpoch()
self._crypto = onionrcrypto.OnionrCrypto(self._core)
@ -248,7 +261,7 @@ class API:
bindPort = int(config.get('client.client.port', 59496))
self.bindPort = bindPort
# Be extremely mindful of this
# Be extremely mindful of this. These are endpoints available without a password
self.whitelistEndpoints = ('site', 'www', 'onionrhome', 'board', 'boardContent', 'sharedContent', 'mail', 'mailindex')
self.clientToken = config.get('client.webpassword')
@ -260,13 +273,14 @@ class API:
logger.info('Running api on %s:%s' % (self.host, self.bindPort))
self.httpServer = ''
self.pluginResponses = {}
self.pluginResponses = {} # Responses for plugin endpoints
self.queueResponse = {}
onionrInst.setClientAPIInst(self)
@app.before_request
def validateRequest():
'''Validate request has set password and is the correct hostname'''
# For the purpose of preventing DNS rebinding attacks
if request.host != '%s:%s' % (self.host, self.bindPort):
abort(403)
if request.endpoint in self.whitelistEndpoints:
@ -279,13 +293,13 @@ class API:
@app.after_request
def afterReq(resp):
# Security headers
if request.endpoint == 'site':
resp.headers['Content-Security-Policy'] = "default-src 'none'; style-src data: 'unsafe-inline'; img-src data:"
else:
resp.headers['Content-Security-Policy'] = "default-src 'none'; script-src 'self'; object-src 'none'; style-src 'self'; img-src 'self'; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src 'self'"
resp.headers['X-Frame-Options'] = 'deny'
resp.headers['X-Content-Type-Options'] = "nosniff"
resp.headers['X-API'] = onionr.API_VERSION
resp.headers['Server'] = ''
resp.headers['Date'] = 'Thu, 1 Jan 1970 00:00:00 GMT' # Clock info is probably useful to attackers. Set to unix epoch.
resp.headers['Connection'] = "close"
@ -317,11 +331,13 @@ class API:
@app.route('/queueResponseAdd/<name>', methods=['post'])
def queueResponseAdd(name):
# Responses from the daemon. TODO: change to direct var access instead of http endpoint
self.queueResponse[name] = request.form['data']
return Response('success')
@app.route('/queueResponse/<name>')
def queueResponse(name):
# Fetch a daemon queue response
resp = 'failure'
try:
resp = self.queueResponse[name]
@ -333,10 +349,12 @@ class API:
@app.route('/ping')
def ping():
# Used to check if client api is working
return Response("pong!")
@app.route('/', endpoint='onionrhome')
def hello():
# ui home
return send_from_directory('static-data/www/private/', 'index.html')
@app.route('/getblocksbytype/<name>')
@ -396,6 +414,7 @@ class API:
@app.route('/waitforshare/<name>', methods=['post'])
def waitforshare(name):
'''Used to prevent the **public** api from sharing blocks we just created'''
assert name.isalnum()
if name in self.publicAPI.hideBlocks:
self.publicAPI.hideBlocks.remove(name)
@ -421,6 +440,7 @@ class API:
@app.route('/getstats')
def getStats():
# returns node stats
#return Response("disabled")
while True:
try:
@ -475,6 +495,7 @@ class API:
@app.route('/apipoints/<path:subpath>', methods=['POST', 'GET'])
def pluginEndpoints(subpath=''):
'''Send data to plugins'''
# TODO have a variable for the plugin to set data to that we can use for the response
pluginResponseCode = str(uuid.uuid4())
resp = 'success'
@ -512,7 +533,7 @@ class API:
def validateToken(self, token):
'''
Validate that the client token matches the given token
Validate that the client token matches the given token. Used to prevent CSRF and data exfiltration
'''
if len(self.clientToken) == 0:
logger.error("client password needs to be set")

View File

@ -189,42 +189,38 @@ class OnionrCommunicatorDaemon:
break
else:
continue
newDBHash = self.peerAction(peer, 'getdbhash') # get their db hash
if newDBHash == False or not self._core._utils.validateHash(newDBHash):
continue # if request failed, restart loop (peer is added to offline peers automatically)
triedPeers.append(peer)
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
self._core.setAddressInfo(peer, 'DBHash', newDBHash)
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
# Saved in memory only for privacy reasons
try:
lastLookupTime = self.dbTimestamps[peer]
except KeyError:
lastLookupTime = 0
else:
listLookupCommand += '?date=%s' % (lastLookupTime,)
try:
newBlocks = self.peerAction(peer, listLookupCommand) # get list of new block hashes
except Exception as error:
logger.warn('Could not get new blocks from %s.' % peer, error = error)
newBlocks = False
else:
self.dbTimestamps[peer] = self._core._utils.getRoundedEpoch(roundS=60)
if newBlocks != False:
# if request was a success
for i in newBlocks.split('\n'):
if self._core._utils.validateHash(i):
# if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in self.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not self._core._blacklist.inBlacklist(i):
if len(self.blockQueue) <= 1000000:
self.blockQueue[i] = [peer] # add blocks to download queue
else:
if peer not in self.blockQueue[i]:
if len(self.blockQueue[i]) < 10:
self.blockQueue[i].append(peer)
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
# Saved in memory only for privacy reasons
try:
lastLookupTime = self.dbTimestamps[peer]
except KeyError:
lastLookupTime = 0
else:
listLookupCommand += '?date=%s' % (lastLookupTime,)
try:
newBlocks = self.peerAction(peer, listLookupCommand) # get list of new block hashes
except Exception as error:
logger.warn('Could not get new blocks from %s.' % peer, error = error)
newBlocks = False
else:
self.dbTimestamps[peer] = self._core._utils.getRoundedEpoch(roundS=60)
if newBlocks != False:
# if request was a success
for i in newBlocks.split('\n'):
if self._core._utils.validateHash(i):
# if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in self.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not self._core._blacklist.inBlacklist(i):
if len(self.blockQueue) <= 1000000:
self.blockQueue[i] = [peer] # add blocks to download queue
else:
if peer not in self.blockQueue[i]:
if len(self.blockQueue[i]) < 10:
self.blockQueue[i].append(peer)
self.decrementThreadCount('lookupBlocks')
return

View File

@ -222,18 +222,8 @@ class Core:
c.execute('Delete from hashes where hash=?;', t)
conn.commit()
conn.close()
blockFile = self.dataDir + '/blocks/%s.dat' % block
dataSize = 0
try:
''' Get size of data when loaded as an object/var, rather than on disk,
to avoid conflict with getsizeof when saving blocks
'''
with open(blockFile, 'r') as data:
dataSize = sys.getsizeof(data.read())
self._utils.storageCounter.removeBytes(dataSize)
os.remove(blockFile)
except FileNotFoundError:
pass
dataSize = sys.getsizeof(onionrstorage.getData(self, block))
self._utils.storageCounter.removeBytes(dataSize)
def createAddressDB(self):
'''
@ -317,9 +307,6 @@ class Core:
#raise Exception("Data is already set for " + dataHash)
else:
if self._utils.storageCounter.addBytes(dataSize) != False:
#blockFile = open(blockFileName, 'wb')
#blockFile.write(data)
#blockFile.close()
onionrstorage.store(self, data, blockHash=dataHash)
conn = sqlite3.connect(self.blockDB, timeout=30)
c = conn.cursor()
@ -558,19 +545,18 @@ class Core:
knownPeer text, 2
speed int, 3
success int, 4
DBHash text, 5
powValue 6
failure int 7
lastConnect 8
trust 9
introduced 10
powValue 5
failure int 6
lastConnect 7
trust 8
introduced 9
'''
conn = sqlite3.connect(self.addressDB, timeout=30)
c = conn.cursor()
command = (address,)
infoNumbers = {'address': 0, 'type': 1, 'knownPeer': 2, 'speed': 3, 'success': 4, 'DBHash': 5, 'powValue': 6, 'failure': 7, 'lastConnect': 8, 'trust': 9, 'introduced': 10}
infoNumbers = {'address': 0, 'type': 1, 'knownPeer': 2, 'speed': 3, 'success': 4, 'powValue': 5, 'failure': 6, 'lastConnect': 7, 'trust': 8, 'introduced': 9}
info = infoNumbers[info]
iterCount = 0
retVal = ''
@ -596,7 +582,7 @@ class Core:
command = (data, address)
if key not in ('address', 'type', 'knownPeer', 'speed', 'success', 'DBHash', 'failure', 'powValue', 'lastConnect', 'lastConnectAttempt', 'trust', 'introduced'):
if key not in ('address', 'type', 'knownPeer', 'speed', 'success', 'failure', 'powValue', 'lastConnect', 'lastConnectAttempt', 'trust', 'introduced'):
raise Exception("Got invalid database key when setting address info")
else:
c.execute('UPDATE adders SET ' + key + ' = ? WHERE address=?', command)
@ -681,19 +667,6 @@ class Core:
conn.close()
return rows
def setBlockType(self, hash, blockType):
'''
Sets the type of block
'''
conn = sqlite3.connect(self.blockDB, timeout=30)
c = conn.cursor()
c.execute("UPDATE hashes SET dataType = ? WHERE hash = ?;", (blockType, hash))
conn.commit()
conn.close()
return
def updateBlockInfo(self, hash, key, data):
'''
sets info associated with a block

View File

@ -39,7 +39,6 @@ class DBCreator:
knownPeer text,
speed int,
success int,
DBHash text,
powValue text,
failure int,
lastConnect int,

View File

@ -43,7 +43,7 @@ except ImportError:
ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.net'
ONIONR_VERSION = '0.5.0' # for debugging and stuff
ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION)
API_VERSION = '5' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you.
API_VERSION = '5' # increments of 1; only change when something fundamental about how the API works changes. This way other nodes know how to communicate without learning too much information about you.
class Onionr:
def __init__(self):
@ -587,7 +587,6 @@ class Onionr:
logger.info("Successfully added address.")
else:
logger.warn("Unable to add address.")
return
def addMessage(self, header="txt"):

View File

@ -116,4 +116,7 @@ class OnionrBlackList:
return
insert = (hashed,)
blacklistDate = self._core._utils.getEpoch()
self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
try:
self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
except sqlite3.IntegrityError:
pass

View File

@ -55,6 +55,21 @@ def _dbFetch(coreInst, blockHash):
conn.close()
return None
def deleteBlock(coreInst, blockHash):
# You should call core.removeBlock if you automatically want to remove storage byte count
assert isinstance(coreInst, core.Core)
if os.path.exists('%s/%s.dat' % (coreInst.blockDataLocation, blockHash)):
os.remove('%s/%s.dat' % (coreInst.blockDataLocation, blockHash))
return True
dbCreate(coreInst)
conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
c = conn.cursor()
data = (blockHash,)
c.execute('DELETE FROM blockData where hash = ?', data)
conn.commit()
conn.close()
return True
def store(coreInst, data, blockHash=''):
assert isinstance(coreInst, core.Core)
assert coreInst._utils.validateHash(blockHash)

View File

@ -194,21 +194,6 @@ class OnionrUtils:
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
def getBlockDBHash(self):
'''
Return a sha3_256 hash of the blocks DB
'''
try:
with open(self._core.blockDB, 'rb') as data:
data = data.read()
hasher = hashlib.sha3_256()
hasher.update(data)
dataHash = hasher.hexdigest()
return dataHash
except Exception as error:
logger.error('Failed to get block DB hash.', error=error)
def hasBlock(self, hash):
'''
Check for new block in the list
@ -334,15 +319,6 @@ class OnionrUtils:
retVal = True
return retVal
def isIntegerString(self, data):
'''Check if a string is a valid base10 integer (also returns true if already an int)'''
try:
int(data)
except ValueError:
return False
else:
return True
def validateID(self, id):
'''
Validate if an address is a valid tor or i2p hidden service
@ -402,6 +378,15 @@ class OnionrUtils:
except:
return False
def isIntegerString(self, data):
'''Check if a string is a valid base10 integer (also returns true if already an int)'''
try:
int(data)
except ValueError:
return False
else:
return True
def isCommunicatorRunning(self, timeout = 5, interval = 0.1):
try:
runcheck_file = self._core.dataDir + '.runcheck'

View File

@ -28,11 +28,11 @@ def mergeAdders(newAdderList, coreInst):
for adder in newAdderList.split(','):
adder = adder.strip()
if not adder in coreInst.listAdders(randomOrder = False) and adder != coreInst.hsAddress and not coreInst._blacklist.inBlacklist(adder):
if not config.get('tor.v3onions') and len(adder) == 62:
if not coreInst.config.get('tor.v3onions') and len(adder) == 62:
continue
if coreInst.addAddress(adder):
# Check if we have the maxmium amount of allowed stored peers
if config.get('peers.max_stored_peers') > len(coreInst.listAdders()):
if coreInst.config.get('peers.max_stored_peers') > len(coreInst.listAdders()):
logger.info('Added %s to db.' % adder, timestamp = True)
retVal = True
else: